{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T21:54:42Z","timestamp":1743026082698,"version":"3.40.3"},"publisher-location":"Cham","reference-count":53,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031732348"},{"type":"electronic","value":"9783031732355"}],"license":[{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73235-5_16","type":"book-chapter","created":{"date-parts":[[2024,9,29]],"date-time":"2024-09-29T06:01:53Z","timestamp":1727589713000},"page":"282-299","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Align Before Collaborate: Mitigating Feature Misalignment for\u00a0Robust Multi-agent Perception"],"prefix":"10.1007","author":[{"given":"Kun","family":"Yang","sequence":"first","affiliation":[]},{"given":"Dingkang","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Ke","family":"Li","sequence":"additional","affiliation":[]},{"given":"Dongling","family":"Xiao","sequence":"additional","affiliation":[]},{"given":"Zedian","family":"Shao","sequence":"additional","affiliation":[]},{"given":"Peng","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Liang","family":"Song","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,30]]},"reference":[{"key":"16_CR1","doi-asserted-by":"crossref","unstructured":"Bai, X., et al.: Transfusion: robust lidar-camera fusion for 3D object detection with transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1090\u20131099 (2022)","DOI":"10.1109\/CVPR52688.2022.00116"},{"key":"16_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"16_CR3","doi-asserted-by":"crossref","unstructured":"Chen, Q., Ma, X., Tang, S., Guo, J., Yang, Q., Fu, S.: F-cooper: feature based cooperative perception for autonomous vehicle edge computing system using 3D point clouds. In: Proceedings of the 4th ACM\/IEEE Symposium on Edge Computing, pp. 88\u2013100 (2019)","DOI":"10.1145\/3318216.3363300"},{"key":"16_CR4","doi-asserted-by":"crossref","unstructured":"Chen, Q., Tang, S., Yang, Q., Fu, S.: Cooper: cooperative perception for connected autonomous vehicles based on 3D point clouds. In: 2019 IEEE 39th International Conference on Distributed Computing Systems, pp. 514\u2013524. IEEE (2019)","DOI":"10.1109\/ICDCS.2019.00058"},{"key":"16_CR5","doi-asserted-by":"crossref","unstructured":"Chen, Z., et al.: Autoalign: pixel-instance feature aggregation for multi-modal 3D object detection. arXiv preprint arXiv:2201.06493 (2022)","DOI":"10.24963\/ijcai.2022\/116"},{"key":"16_CR6","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"628","DOI":"10.1007\/978-3-031-20074-8_36","volume-title":"ECCV 2022","author":"Z Chen","year":"2022","unstructured":"Chen, Z., Li, Z., Zhang, S., Fang, L., Jiang, Q., Zhao, F.: Deformable feature aggregation for dynamic multi-modal 3D object detection. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13668, pp. 628\u2013644. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20074-8_36"},{"key":"16_CR7","unstructured":"Hu, Y., Fang, S., Lei, Z., Zhong, Y., Chen, S.: Where2comm: communication-efficient collaborative perception via spatial confidence maps. In: Advances in Neural Information Processing Systems, vol. 35, pp. 4874\u20134886 (2022)"},{"key":"16_CR8","doi-asserted-by":"crossref","unstructured":"Hu, Y., Lu, Y., Xu, R., Xie, W., Chen, S., Wang, Y.: Collaboration helps camera overtake lidar in 3D detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9243\u20139252 (2023)","DOI":"10.1109\/CVPR52729.2023.00892"},{"key":"16_CR9","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. In: International Conference on Learning Representations (2015)"},{"key":"16_CR10","doi-asserted-by":"crossref","unstructured":"Lang, A.H., Vora, S., Caesar, H., Zhou, L., Yang, J., Beijbom, O.: Pointpillars: fast encoders for object detection from point clouds. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12697\u201312705 (2019)","DOI":"10.1109\/CVPR.2019.01298"},{"key":"16_CR11","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"316","DOI":"10.1007\/978-3-031-19824-3_19","volume-title":"ECCV 2022","author":"Z Lei","year":"2022","unstructured":"Lei, Z., Ren, S., Hu, Y., Zhang, W., Chen, S.: Latency-aware collaborative perception. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13692, pp. 316\u2013332. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19824-3_19"},{"key":"16_CR12","doi-asserted-by":"crossref","unstructured":"Li, F., et al.: Lite DETR: an interleaved multi-scale encoder for efficient DETR. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18558\u201318567 (2023)","DOI":"10.1109\/CVPR52729.2023.01780"},{"key":"16_CR13","unstructured":"Li, Y., Ren, S., Wu, P., Chen, S., Feng, C., Zhang, W.: Learning distilled collaboration graph for multi-agent perception. In: Advances in Neural Information Processing Systems, vol. 34, pp. 29541\u201329552 (2021)"},{"key":"16_CR14","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/978-3-031-20077-9_1","volume-title":"ECCV 2022","author":"Z Li","year":"2022","unstructured":"Li, Z., et al.: Bevformer: learning bird\u2019s-eye-view representation from multi-camera images via spatiotemporal transformers. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13669, pp. 1\u201318. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20077-9_1"},{"key":"16_CR15","doi-asserted-by":"crossref","unstructured":"Lin, T.Y., Goyal, P., Girshick, R., He, K., Doll\u00e1r, P.: Focal loss for dense object detection. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2980\u20132988 (2017)","DOI":"10.1109\/ICCV.2017.324"},{"key":"16_CR16","doi-asserted-by":"crossref","unstructured":"Liu, Y.C., Tian, J., Glaser, N., Kira, Z.: When2com: multi-agent perception via communication graph grouping. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4106\u20134115 (2020)","DOI":"10.1109\/CVPR42600.2020.00416"},{"key":"16_CR17","unstructured":"Lu, Y., Hu, Y., Zhong, Y., Wang, D., Chen, S., Wang, Y.: An extensible framework for open heterogeneous collaborative perception. arXiv preprint arXiv:2401.13964 (2024)"},{"key":"16_CR18","doi-asserted-by":"crossref","unstructured":"Lu, Y., et al.: Robust collaborative 3D object detection in presence of pose errors. In: 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 4812\u20134818. IEEE (2023)","DOI":"10.1109\/ICRA48891.2023.10160546"},{"key":"16_CR19","doi-asserted-by":"crossref","unstructured":"Misra, I., Girdhar, R., Joulin, A.: An end-to-end transformer model for 3D object detection. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2906\u20132917 (2021)","DOI":"10.1109\/ICCV48922.2021.00290"},{"key":"16_CR20","unstructured":"Paszke, A., et\u00a0al.: Pytorch: an imperative style, high-performance deep learning library. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"16_CR21","unstructured":"Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: deep learning on point sets for 3D classification and segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 652\u2013660 (2017)"},{"key":"16_CR22","doi-asserted-by":"publisher","first-page":"162","DOI":"10.1016\/j.trf.2016.07.012","volume":"42","author":"T Ram","year":"2016","unstructured":"Ram, T., Chand, K.: Effect of drivers\u2019 risk perception and perception of driving tasks on road safety attitude. Transport. Res. F: Traffic Psychol. Behav. 42, 162\u2013176 (2016)","journal-title":"Transport. Res. F: Traffic Psychol. Behav."},{"key":"16_CR23","doi-asserted-by":"crossref","unstructured":"Rawashdeh, Z.Y., Wang, Z.: Collaborative automated driving: a machine learning-based method to enhance the accuracy of shared information. In: 2018 21st International Conference on Intelligent Transportation Systems, pp. 3961\u20133966. IEEE (2018)","DOI":"10.1109\/ITSC.2018.8569832"},{"key":"16_CR24","doi-asserted-by":"crossref","unstructured":"Redmon, J., Divvala, S., Girshick, R., Farhadi, A.: You only look once: unified, real-time object detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 779\u2013788 (2016)","DOI":"10.1109\/CVPR.2016.91"},{"key":"16_CR25","doi-asserted-by":"crossref","unstructured":"Sheng, H., et al.: Improving 3D object detection with channel-wise transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2743\u20132752 (2021)","DOI":"10.1109\/ICCV48922.2021.00274"},{"key":"16_CR26","doi-asserted-by":"crossref","unstructured":"Shi, S., et al.: VIPS: real-time perception fusion for infrastructure-assisted autonomous driving. In: Proceedings of the 28th Annual International Conference on Mobile Computing and Networking, pp. 133\u2013146 (2022)","DOI":"10.1145\/3495243.3560539"},{"key":"16_CR27","doi-asserted-by":"crossref","unstructured":"Wang, B., Zhang, L., Wang, Z., Zhao, Y., Zhou, T.: Core: cooperative reconstruction for multi-agent perception. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8710\u20138720 (2023)","DOI":"10.1109\/ICCV51070.2023.00800"},{"key":"16_CR28","doi-asserted-by":"crossref","unstructured":"Wang, T., et al.: UMC: a unified bandwidth-efficient and multi-resolution based collaborative perception framework. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8187\u20138196 (2023)","DOI":"10.1109\/ICCV51070.2023.00752"},{"key":"16_CR29","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"605","DOI":"10.1007\/978-3-030-58536-5_36","volume-title":"Computer Vision \u2013 ECCV 2020","author":"T-H Wang","year":"2020","unstructured":"Wang, T.-H., Manivasagam, S., Liang, M., Yang, B., Zeng, W., Urtasun, R.: V2VNet: vehicle-to-vehicle communication for joint perception and prediction. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12347, pp. 605\u2013621. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58536-5_36"},{"key":"16_CR30","unstructured":"Wang, Z., et al.: VIMI: vehicle-infrastructure multi-view intermediate fusion for camera-based 3D object detection. arXiv preprint arXiv:2303.10975 (2023)"},{"key":"16_CR31","unstructured":"Wei, S., et al.: Robust asynchronous collaborative 3D detection via bird\u2019s eye view flow. arXiv preprint arXiv:2309.16940 (2023)"},{"key":"16_CR32","unstructured":"Xizhou, Z., Weijie, S., Lewei, L., Bin, L., Xiaogang, W., Jifeng, D.: Deformable DETR: deformable transformers for end-to-end object detection. In: International Conference on Learning Representations (2021)"},{"key":"16_CR33","doi-asserted-by":"crossref","unstructured":"Xu, R., Chen, W., Xiang, H., Xia, X., Liu, L., Ma, J.: Model-agnostic multi-agent perception framework. In: 2023 IEEE International Conference on Robotics and Automation, pp. 1471\u20131478. IEEE (2023)","DOI":"10.1109\/ICRA48891.2023.10161460"},{"key":"16_CR34","unstructured":"Xu, R., Tu, Z., Xiang, H., Shao, W., Zhou, B., Ma, J.: Cobevt: cooperative bird\u2019s eye view semantic segmentation with sparse transformers. In: Conference on Robot Learning (2022)"},{"key":"16_CR35","doi-asserted-by":"crossref","unstructured":"Xu, R., et\u00a0al.: V2v4real: a real-world large-scale dataset for vehicle-to-vehicle cooperative perception. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13712\u201313722 (2023)","DOI":"10.1109\/CVPR52729.2023.01318"},{"key":"16_CR36","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"107","DOI":"10.1007\/978-3-031-19842-7_7","volume-title":"ECCV 2022","author":"R Xu","year":"2022","unstructured":"Xu, R., Xiang, H., Tu, Z., Xia, X., Yang, M.H., Ma, J.: V2X-ViT: vehicle-to-everything cooperative perception with vision transformer. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13699, pp. 107\u2013124. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19842-7_7"},{"key":"16_CR37","doi-asserted-by":"crossref","unstructured":"Xu, R., Xiang, H., Xia, X., Han, X., Li, J., Ma, J.: Opv2v: an open benchmark dataset and fusion pipeline for perception with vehicle-to-vehicle communication. In: Proceedings of the International Conference on Robotics and Automation, pp. 2583\u20132589. IEEE (2022)","DOI":"10.1109\/ICRA46639.2022.9812038"},{"key":"16_CR38","doi-asserted-by":"crossref","unstructured":"Yang, D., Huang, S., Kuang, H., Du, Y., Zhang, L.: Disentangled representation learning for multimodal emotion recognition. In: Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), pp. 1642\u20131651 (2022)","DOI":"10.1145\/3503161.3547754"},{"key":"16_CR39","doi-asserted-by":"crossref","unstructured":"Yang, D., et al.: Aide: a vision-driven multi-view, multi-modal, multi-tasking dataset for assistive driving perception. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 20459\u201320470 (2023)","DOI":"10.1109\/ICCV51070.2023.01871"},{"key":"16_CR40","doi-asserted-by":"crossref","unstructured":"Yang, D., Yang, K., Li, M., Wang, S., Wang, S., Zhang, L.: Robust emotion recognition in context debiasing. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 12447\u201312457 (2024)","DOI":"10.1109\/CVPR52733.2024.01183"},{"key":"16_CR41","unstructured":"Yang, D., et al.: How2comm: communication-efficient and collaboration-pragmatic multi-agent perception. In: Advances in Neural Information Processing Systems, vol. 36 (2024)"},{"key":"16_CR42","doi-asserted-by":"crossref","unstructured":"Yang, K., et al.: A novel efficient multi-view traffic-related object detection framework. In: IEEE International Conference on Acoustics, Speech and Signal Processing, pp.\u00a01\u20135 (2023)","DOI":"10.1109\/ICASSP49357.2023.10095027"},{"key":"16_CR43","doi-asserted-by":"crossref","unstructured":"Yang, K., Sun, P., Lin, J., Boukerche, A., Song, L.: A novel distributed task scheduling framework for supporting vehicular edge intelligence. In: 2022 IEEE 42nd International Conference on Distributed Computing Systems (ICDCS), pp. 972\u2013982. IEEE (2022)","DOI":"10.1109\/ICDCS54860.2022.00098"},{"key":"16_CR44","doi-asserted-by":"publisher","DOI":"10.1016\/j.adhoc.2023.103343","volume":"153","author":"K Yang","year":"2024","unstructured":"Yang, K., Sun, P., Yang, D., Lin, J., Boukerche, A., Song, L.: A novel hierarchical distributed vehicular edge computing framework for supporting intelligent driving. Ad Hoc Netw. 153, 103343 (2024)","journal-title":"Ad Hoc Netw."},{"key":"16_CR45","doi-asserted-by":"crossref","unstructured":"Yang, K., et al.: Spatio-temporal domain awareness for multi-agent collaborative perception. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 23383\u201323392 (2023)","DOI":"10.1109\/ICCV51070.2023.02137"},{"key":"16_CR46","doi-asserted-by":"crossref","unstructured":"Yang, K., Yang, D., Zhang, J., Wang, H., Sun, P., Song, L.: What2comm: towards communication-efficient collaborative perception via feature decoupling. In: Proceedings of the 31th ACM International Conference on Multimedia (ACM MM), pp. 7686\u20137695 (2023)","DOI":"10.1145\/3581783.3611699"},{"key":"16_CR47","doi-asserted-by":"crossref","unstructured":"Yu, H., et\u00a0al.: Dair-v2x: a large-scale dataset for vehicle-infrastructure cooperative 3D object detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 21361\u201321370 (2022)","DOI":"10.1109\/CVPR52688.2022.02067"},{"key":"16_CR48","unstructured":"Yu, H., Tang, Y., Xie, E., Mao, J., Luo, P., Nie, Z.: Flow-based feature fusion for vehicle-infrastructure cooperative 3D object detection. In: Advances in Neural Information Processing Systems, vol. 36 (2024)"},{"key":"16_CR49","doi-asserted-by":"crossref","unstructured":"Yu, H., et\u00a0al.: V2x-seq: a large-scale sequential dataset for vehicle-infrastructure cooperative perception and forecasting. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5486\u20135495 (2023)","DOI":"10.1109\/CVPR52729.2023.00531"},{"key":"16_CR50","doi-asserted-by":"crossref","unstructured":"Yuan, X., Kortylewski, A., Sun, Y., Yuille, A.: Robust instance segmentation through reasoning about multi-object occlusion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11141\u201311150 (2021)","DOI":"10.1109\/CVPR46437.2021.01099"},{"issue":"4","key":"16_CR51","doi-asserted-by":"publisher","first-page":"2068","DOI":"10.1109\/TCSVT.2021.3082763","volume":"32","author":"Z Yuan","year":"2021","unstructured":"Yuan, Z., Song, X., Bai, L., Wang, Z., Ouyang, W.: Temporal-channel transformer for 3D lidar-based video object detection for autonomous driving. IEEE Trans. Circuits Syst. Video Technol. 32(4), 2068\u20132078 (2021)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"16_CR52","doi-asserted-by":"crossref","unstructured":"Zhao, H., Shi, J., Qi, X., Wang, X., Jia, J.: Pyramid scene parsing network. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2881\u20132890 (2017)","DOI":"10.1109\/CVPR.2017.660"},{"key":"16_CR53","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"496","DOI":"10.1007\/978-3-031-19839-7_29","volume-title":"ECCV 2022","author":"Z Zhou","year":"2022","unstructured":"Zhou, Z., Zhao, X., Wang, Y., Wang, P., Foroosh, H.: Centerformer: center-based transformer for 3D object detection. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13698, pp. 496\u2013513. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19839-7_29"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73235-5_16","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,29]],"date-time":"2024-09-29T06:16:01Z","timestamp":1727590561000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73235-5_16"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,30]]},"ISBN":["9783031732348","9783031732355"],"references-count":53,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73235-5_16","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,9,30]]},"assertion":[{"value":"30 September 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}