{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T04:09:21Z","timestamp":1727669361645},"publisher-location":"Cham","reference-count":56,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031727832","type":"print"},{"value":"9783031727849","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72784-9_27","type":"book-chapter","created":{"date-parts":[[2024,9,29]],"date-time":"2024-09-29T07:01:50Z","timestamp":1727593310000},"page":"485-503","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["SafaRi: Adaptive Sequence Transformer for\u00a0Weakly Supervised Referring Expression Segmentation"],"prefix":"10.1007","author":[{"given":"Sayan","family":"Nag","sequence":"first","affiliation":[]},{"given":"Koustava","family":"Goswami","sequence":"additional","affiliation":[]},{"given":"Srikrishna","family":"Karanam","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,30]]},"reference":[{"key":"27_CR1","doi-asserted-by":"crossref","unstructured":"Bolya, D., Zhou, C., Xiao, F., Lee, Y.J.: YOLACT: real-time instance segmentation. In: ICCV, pp. 9157\u20139166 (2019)","DOI":"10.1109\/ICCV.2019.00925"},{"key":"27_CR2","doi-asserted-by":"crossref","unstructured":"Botach, A., Zheltonozhskii, E., Baskin, C.: End-to-end referring video object segmentation with multimodal transformers. In: CVPR, pp. 4985\u20134995 (2022)","DOI":"10.1109\/CVPR52688.2022.00493"},{"key":"27_CR3","doi-asserted-by":"crossref","unstructured":"Chen, D.J., Jia, S., Lo, Y.C., Chen, H.T., Liu, T.L.: See-through-text grouping for referring image segmentation. In: ICCV, pp. 7454\u20137463 (2019)","DOI":"10.1109\/ICCV.2019.00755"},{"key":"27_CR4","doi-asserted-by":"crossref","unstructured":"Chen, K., et\u00a0al.: Hybrid task cascade for instance segmentation. In: CVPR, pp. 4974\u20134983 (2019)","DOI":"10.1109\/CVPR.2019.00511"},{"key":"27_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"104","DOI":"10.1007\/978-3-030-58577-8_7","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Y-C Chen","year":"2020","unstructured":"Chen, Y.-C., et al.: UNITER: UNiversal image-TExt representation learning. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12375, pp. 104\u2013120. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58577-8_7"},{"key":"27_CR6","doi-asserted-by":"crossref","unstructured":"Dai, J., He, K., Sun, J.: Instance-aware semantic segmentation via multi-task network cascades. In: CVPR, pp. 3150\u20133158 (2016)","DOI":"10.1109\/CVPR.2016.343"},{"key":"27_CR7","doi-asserted-by":"crossref","unstructured":"Ding, H., Liu, C., Wang, S., Jiang, X.: Vision-language transformer and query generation for referring segmentation. In: ICCV, pp. 16321\u201316330 (2021)","DOI":"10.1109\/ICCV48922.2021.01601"},{"key":"27_CR8","doi-asserted-by":"crossref","unstructured":"Dou, Z.Y., et\u00a0al.: An empirical study of training end-to-end vision-and-language transformers. In: CVPR, pp. 18166\u201318176 (2022)","DOI":"10.1109\/CVPR52688.2022.01763"},{"key":"27_CR9","doi-asserted-by":"crossref","unstructured":"Feng, G., Hu, Z., Zhang, L., Lu, H.: Encoder fusion network with co-attention embedding for referring image segmentation. In: CVPR, pp. 15506\u201315515 (2021)","DOI":"10.1109\/CVPR46437.2021.01525"},{"key":"27_CR10","doi-asserted-by":"crossref","unstructured":"Gavrilyuk, K., Ghodrati, A., Li, Z., Snoek, C.G.: Actor and action video segmentation from a sentence. In: CVPR, pp. 5958\u20135966 (2018)","DOI":"10.1109\/CVPR.2018.00624"},{"key":"27_CR11","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask R-CNN. In: ICCV, pp. 2961\u20132969 (2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"27_CR12","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"108","DOI":"10.1007\/978-3-319-46448-0_7","volume-title":"Computer Vision \u2013 ECCV 2016","author":"R Hu","year":"2016","unstructured":"Hu, R., Rohrbach, M., Darrell, T.: Segmentation from natural language expressions. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9905, pp. 108\u2013124. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46448-0_7"},{"key":"27_CR13","doi-asserted-by":"crossref","unstructured":"Hu, Z., Feng, G., Sun, J., Zhang, L., Lu, H.: Bi-directional relationship inferring network for referring image segmentation. In: CVPR, pp. 4424\u20134433 (2020)","DOI":"10.1109\/CVPR42600.2020.00448"},{"key":"27_CR14","doi-asserted-by":"crossref","unstructured":"Huang, S., et al.: Referring image segmentation via cross-modal progressive comprehension. In: CVPR, pp. 10488\u201310497 (2020)","DOI":"10.1109\/CVPR42600.2020.01050"},{"key":"27_CR15","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"59","DOI":"10.1007\/978-3-030-58607-2_4","volume-title":"Computer Vision \u2013 ECCV 2020","author":"T Hui","year":"2020","unstructured":"Hui, T., et al.: Linguistic structure guided context modeling for referring image segmentation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12355, pp. 59\u201375. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58607-2_4"},{"key":"27_CR16","doi-asserted-by":"crossref","unstructured":"Jing, Y., Kong, T., Wang, W., Wang, L., Li, L., Tan, T.: Locate then segment: a strong pipeline for referring image segmentation. In: CVPR, pp. 9858\u20139867 (2021)","DOI":"10.1109\/CVPR46437.2021.00973"},{"key":"27_CR17","doi-asserted-by":"crossref","unstructured":"Kamath, A., Singh, M., LeCun, Y., Synnaeve, G., Misra, I., Carion, N.: MDETR-modulated detection for end-to-end multi-modal understanding. In: ICCV, pp. 1780\u20131790 (2021)","DOI":"10.1109\/ICCV48922.2021.00180"},{"key":"27_CR18","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"123","DOI":"10.1007\/978-3-030-20870-7_8","volume-title":"Computer Vision \u2013 ACCV 2018","author":"A Khoreva","year":"2019","unstructured":"Khoreva, A., Rohrbach, A., Schiele, B.: Video object segmentation with language referring expressions. In: Jawahar, C.V., Li, H., Mori, G., Schindler, K. (eds.) ACCV 2018. LNCS, vol. 11364, pp. 123\u2013141. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-20870-7_8"},{"key":"27_CR19","doi-asserted-by":"crossref","unstructured":"Kim, D., Kim, N., Lan, C., Kwak, S.: Shatter and gather: learning referring image segmentation with text supervision. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 15547\u201315557 (2023)","DOI":"10.1109\/ICCV51070.2023.01425"},{"key":"27_CR20","doi-asserted-by":"crossref","unstructured":"Kim, N., Kim, D., Lan, C., Zeng, W., Kwak, S.: Restr: convolution-free referring image segmentation using transformers. In: CVPR, pp. 18145\u201318154 (2022)","DOI":"10.1109\/CVPR52688.2022.01761"},{"key":"27_CR21","doi-asserted-by":"crossref","unstructured":"Lee, J., Lee, S., Nam, J., Yu, S., Do, J., Taghavi, T.: Weakly supervised referring image segmentation with intra-chunk and inter-chunk consistency. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 21870\u201321881 (2023)","DOI":"10.1109\/ICCV51070.2023.01999"},{"key":"27_CR22","unstructured":"Li, J., Selvaraju, R., Gotmare, A., Joty, S., Xiong, C., Hoi, S.C.H.: Align before fuse: vision and language representation learning with momentum distillation. In: NeurIPS (2021)"},{"key":"27_CR23","doi-asserted-by":"crossref","unstructured":"Li, L.H., et\u00a0al.: Grounded language-image pre-training. In: CVPR, pp. 10965\u201310975 (2022)","DOI":"10.1109\/CVPR52688.2022.01069"},{"key":"27_CR24","unstructured":"Li, M., Sigal, L.: Referring transformer: a one-step approach to multi-task visual grounding. In: NeurIPS (2021)"},{"key":"27_CR25","doi-asserted-by":"crossref","unstructured":"Li, R., et al.: Referring image segmentation via recurrent refinement networks. In: CVPR, pp. 5745\u20135753 (2018)","DOI":"10.1109\/CVPR.2018.00602"},{"key":"27_CR26","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"121","DOI":"10.1007\/978-3-030-58577-8_8","volume-title":"Computer Vision \u2013 ECCV 2020","author":"X Li","year":"2020","unstructured":"Li, X., et al.: Oscar: object-semantics aligned pre-training for vision-language tasks. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12375, pp. 121\u2013137. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58577-8_8"},{"key":"27_CR27","doi-asserted-by":"crossref","unstructured":"Liu, C., Lin, Z., Shen, X., Yang, J., Lu, X., Yuille, A.: Recurrent multimodal interaction for referring image segmentation. In: ICCV, pp. 1271\u20131280 (2017)","DOI":"10.1109\/ICCV.2017.143"},{"key":"27_CR28","doi-asserted-by":"crossref","unstructured":"Liu, F., et al.: Referring image segmentation using text supervision. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 22124\u201322134 (2023)","DOI":"10.1109\/ICCV51070.2023.02022"},{"key":"27_CR29","doi-asserted-by":"crossref","unstructured":"Liu, J., et al.: Polyformer: referring image segmentation as sequential polygon generation. In: CVPR, pp. 18653\u201318663 (2023)","DOI":"10.1109\/CVPR52729.2023.01789"},{"key":"27_CR30","doi-asserted-by":"crossref","unstructured":"Liu, S., Qi, L., Qin, H., Shi, J., Jia, J.: Path aggregation network for instance segmentation. In: CVPR, pp. 8759\u20138768 (2018)","DOI":"10.1109\/CVPR.2018.00913"},{"key":"27_CR31","unstructured":"Liu, Y., et al.: RoBERTa: a robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)"},{"key":"27_CR32","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: ICCV, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"27_CR33","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101 (2017)"},{"key":"27_CR34","unstructured":"Lu, J., Batra, D., Parikh, D., Lee, S.: VilBERT: pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In: NeurIPS (2019)"},{"key":"27_CR35","doi-asserted-by":"crossref","unstructured":"Lu, J., Goswami, V., Rohrbach, M., Parikh, D., Lee, S.: 12-in-1: multi-task vision and language representation learning. In: CVPR, pp. 10437\u201310446 (2020)","DOI":"10.1109\/CVPR42600.2020.01045"},{"key":"27_CR36","doi-asserted-by":"crossref","unstructured":"Mao, J., Huang, J., Toshev, A., Camburu, O., Yuille, A.L., Murphy, K.: Generation and comprehension of unambiguous object descriptions. In: CVPR, pp. 11\u201320 (2016)","DOI":"10.1109\/CVPR.2016.9"},{"key":"27_CR37","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"656","DOI":"10.1007\/978-3-030-01252-6_39","volume-title":"Computer Vision \u2013 ECCV 2018","author":"E Margffoy-Tuay","year":"2018","unstructured":"Margffoy-Tuay, E., P\u00e9rez, J.C., Botero, E., Arbel\u00e1ez, P.: Dynamic multimodal instance segmentation guided by natural language queries. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11215, pp. 656\u2013672. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01252-6_39"},{"key":"27_CR38","doi-asserted-by":"crossref","unstructured":"Milletari, F., Navab, N., Ahmadi, S.A.: V-net: fully convolutional neural networks for volumetric medical image segmentation. In: 3DV, pp. 565\u2013571. IEEE (2016)","DOI":"10.1109\/3DV.2016.79"},{"key":"27_CR39","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"792","DOI":"10.1007\/978-3-319-46493-0_48","volume-title":"Computer Vision \u2013 ECCV 2016","author":"VK Nagaraja","year":"2016","unstructured":"Nagaraja, V.K., Morariu, V.I., Davis, L.S.: Modeling context between objects for referring expression understanding. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 792\u2013807. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_48"},{"key":"27_CR40","doi-asserted-by":"crossref","unstructured":"Qu, M., Wu, Y., Wei, Y., Liu, W., Liang, X., Zhao, Y.: Learning to segment every referring object point by point. In: CVPR, pp. 3021\u20133030 (2023)","DOI":"10.1109\/CVPR52729.2023.00295"},{"key":"27_CR41","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: ICML, pp. 8748\u20138763 (2021). https:\/\/proceedings.mlr.press\/v139\/radford21a.html"},{"key":"27_CR42","first-page":"12116","volume":"34","author":"M Raghu","year":"2021","unstructured":"Raghu, M., Unterthiner, T., Kornblith, S., Zhang, C., Dosovitskiy, A.: Do vision transformers see like convolutional neural networks? NeurIPS 34, 12116\u201312128 (2021)","journal-title":"NeurIPS"},{"key":"27_CR43","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. NeurIPS 28 (2015)"},{"key":"27_CR44","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"38","DOI":"10.1007\/978-3-030-01231-1_3","volume-title":"Computer Vision \u2013 ECCV 2018","author":"H Shi","year":"2018","unstructured":"Shi, H., Li, H., Meng, F., Wu, Q.: Key-word-aware network for referring expression image segmentation. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11210, pp. 38\u201354. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01231-1_3"},{"key":"27_CR45","doi-asserted-by":"crossref","unstructured":"Shtedritski, A., Rupprecht, C., Vedaldi, A.: What does clip know about a red circle? Visual prompt engineering for VLMS. arXiv preprint arXiv:2304.06712 (2023)","DOI":"10.1109\/ICCV51070.2023.01101"},{"key":"27_CR46","unstructured":"Strudel, R., Laptev, I., Schmid, C.: Weakly-supervised segmentation of referring expressions. arXiv preprint arXiv:2205.04725 (2022)"},{"key":"27_CR47","doi-asserted-by":"crossref","unstructured":"Wang, Z., et al.: CRIS: clip-driven referring image segmentation. In: CVPR, pp. 11686\u201311695 (2022)","DOI":"10.1109\/CVPR52688.2022.01139"},{"key":"27_CR48","doi-asserted-by":"crossref","unstructured":"Wu, J., Jiang, Y., Sun, P., Yuan, Z., Luo, P.: Language as queries for referring video object segmentation. In: CVPR, pp. 4974\u20134984 (2022)","DOI":"10.1109\/CVPR52688.2022.00492"},{"key":"27_CR49","doi-asserted-by":"crossref","unstructured":"Yang, S., Xia, M., Li, G., Zhou, H.Y., Yu, Y.: Bottom-up shift and reasoning for referring image segmentation. In: CVPR, pp. 11266\u201311275 (2021)","DOI":"10.1109\/CVPR46437.2021.01111"},{"key":"27_CR50","doi-asserted-by":"crossref","unstructured":"Yang, Z., Wang, J., Tang, Y., Chen, K., Zhao, H., Torr, P.H.: LAVT: language-aware vision transformer for referring image segmentation. In: CVPR, pp. 18155\u201318165 (2022)","DOI":"10.1109\/CVPR52688.2022.01762"},{"key":"27_CR51","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"521","DOI":"10.1007\/978-3-031-20059-5_30","volume-title":"Computer Vision - ECCV 2022","author":"Z Yang","year":"2022","unstructured":"Yang, Z., et al.: UniTAB: unifying text and box outputs for grounded vision-language modeling. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13696, pp. 521\u2013539. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20059-5_30"},{"key":"27_CR52","doi-asserted-by":"crossref","unstructured":"Ye, L., Rochan, M., Liu, Z., Wang, Y.: Cross-modal self-attention network for referring image segmentation. In: CVPR, pp. 10502\u201310511 (2019)","DOI":"10.1109\/CVPR.2019.01075"},{"key":"27_CR53","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"69","DOI":"10.1007\/978-3-319-46475-6_5","volume-title":"Computer Vision \u2013 ECCV 2016","author":"L Yu","year":"2016","unstructured":"Yu, L., Poirson, P., Yang, S., Berg, A.C., Berg, T.L.: Modeling context in referring expressions. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9906, pp. 69\u201385. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46475-6_5"},{"key":"27_CR54","first-page":"36067","volume":"35","author":"H Zhang","year":"2022","unstructured":"Zhang, H., et al.: Glipv2: unifying localization and vision-language understanding. NeurIPS 35, 36067\u201336080 (2022)","journal-title":"NeurIPS"},{"key":"27_CR55","doi-asserted-by":"crossref","unstructured":"Zhang, P., et al.: VinVL: revisiting visual representations in vision-language models. In: CVPR, pp. 5579\u20135588 (2021)","DOI":"10.1109\/CVPR46437.2021.00553"},{"key":"27_CR56","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"598","DOI":"10.1007\/978-3-031-19833-5_35","volume-title":"Computer Vision - ECCV 2022","author":"C Zhu","year":"2022","unstructured":"Zhu, C., et al.: SeqTR: a simple yet universal network for visual grounding. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13695, pp. 598\u2013615. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19833-5_35"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72784-9_27","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,29]],"date-time":"2024-09-29T07:54:37Z","timestamp":1727596477000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72784-9_27"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,30]]},"ISBN":["9783031727832","9783031727849"],"references-count":56,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72784-9_27","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,9,30]]},"assertion":[{"value":"30 September 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}