{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T12:29:17Z","timestamp":1743078557113,"version":"3.40.3"},"publisher-location":"Cham","reference-count":65,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031729799"},{"type":"electronic","value":"9783031729805"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-72980-5_17","type":"book-chapter","created":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T09:15:43Z","timestamp":1730106943000},"page":"288-304","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["HaloQuest: A Visual Hallucination Dataset for Advancing Multimodal Reasoning"],"prefix":"10.1007","author":[{"given":"Zhecan","family":"Wang","sequence":"first","affiliation":[]},{"given":"Garrett","family":"Bingham","sequence":"additional","affiliation":[]},{"given":"Adams Wei","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Quoc V.","family":"Le","sequence":"additional","affiliation":[]},{"given":"Thang","family":"Luong","sequence":"additional","affiliation":[]},{"given":"Golnaz","family":"Ghiasi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,29]]},"reference":[{"key":"17_CR1","unstructured":"GPT-4v(ision) system card (2023). https:\/\/api.semanticscholar.org\/CorpusID:263218031"},{"key":"17_CR2","unstructured":"Midjourney (2023). https:\/\/midjourney.com\/"},{"key":"17_CR3","doi-asserted-by":"publisher","unstructured":"Alkaissi, H., Mcfarlane, S.: Artificial hallucinations in ChatGPT: implications in scientific writing. Cureus 15 (2023). https:\/\/doi.org\/10.7759\/cureus.35179","DOI":"10.7759\/cureus.35179"},{"key":"17_CR4","doi-asserted-by":"crossref","unstructured":"Antol, S., et al.: VQA: visual question answering. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2425\u20132433 (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"17_CR5","unstructured":"Awadalla, A., et\u00a0al.: OpenFlamingo: an open-source framework for training large autoregressive vision-language models. arXiv preprint arXiv:2308.01390 (2023)"},{"key":"17_CR6","unstructured":"Banerjee, S., Lavie, A.: METEOR: an automatic metric for MT evaluation with improved correlation with human judgments. In: Goldstein, J., Lavie, A., Lin, C.Y., Voss, C. (eds.) Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and\/or Summarization, pp. 65\u201372. Association for Computational Linguistics, Ann Arbor (2005). https:\/\/aclanthology.org\/W05-0909"},{"key":"17_CR7","doi-asserted-by":"crossref","unstructured":"Bang, Y., et\u00a0al.: A multitask, multilingual, multimodal evaluation of ChatGPT on reasoning, hallucination, and interactivity. arXiv preprint arXiv:2302.04023 (2023)","DOI":"10.18653\/v1\/2023.ijcnlp-main.45"},{"key":"17_CR8","doi-asserted-by":"crossref","unstructured":"Bender, E.M., Gebru, T., McMillan-Major, A., Shmitchell, S.: On the dangers of stochastic parrots: can language models be too big? In: Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, pp. 610\u2013623 (2021)","DOI":"10.1145\/3442188.3445922"},{"key":"17_CR9","doi-asserted-by":"crossref","unstructured":"Biten, A.F., G\u00f3mez, L., Karatzas, D.: Let there be a clock on the beach: reducing object hallucination in image captioning. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 1381\u20131390 (2022)","DOI":"10.1109\/WACV51458.2022.00253"},{"key":"17_CR10","doi-asserted-by":"crossref","unstructured":"Bitton-Guetta, N., et al.: Breaking common sense: whoops! A vision-and-language benchmark of synthetic and compositional images. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2616\u20132627 (2023)","DOI":"10.1109\/ICCV51070.2023.00247"},{"key":"17_CR11","unstructured":"Chiang, W.L., et\u00a0al.: Vicuna: an open-source chatbot impressing GPT-4 with 90%* ChatGPT quality (2023). https:\/\/vicunalmsys.org. Accessed 14 Apr 2023"},{"key":"17_CR12","unstructured":"Cui, C., et al.: Holistic analysis of hallucination in GPT-4V (ision): bias and interference challenges. arXiv preprint arXiv:2311.03287 (2023)"},{"key":"17_CR13","doi-asserted-by":"crossref","unstructured":"Dai, W., Liu, Z., Ji, Z., Su, D., Fung, P.: Plausible may not be faithful: probing object hallucination in vision-language pre-training. arXiv preprint arXiv:2210.07688 (2022)","DOI":"10.18653\/v1\/2023.eacl-main.156"},{"key":"17_CR14","doi-asserted-by":"crossref","unstructured":"Deng, J., Chan, G., Zhong, H., Lu, C.X.: See beyond seeing: robust 3D object detection from point clouds via cross-modal hallucination. arXiv preprint arXiv:2309.17336 (2023)","DOI":"10.1109\/ICRA57147.2024.10610775"},{"key":"17_CR15","doi-asserted-by":"crossref","unstructured":"Goyal, Y., Khot, T., Summers-Stay, D., Batra, D., Parikh, D.: Making the V in VQA matter: elevating the role of image understanding in visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6904\u20136913 (2017)","DOI":"10.1109\/CVPR.2017.670"},{"key":"17_CR16","doi-asserted-by":"crossref","unstructured":"Gunjal, A., Yin, J., Bas, E.: Detecting and preventing hallucinations in large vision language models. arXiv preprint arXiv:2308.06394 (2023)","DOI":"10.1609\/aaai.v38i16.29771"},{"issue":"12","key":"17_CR17","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3571730","volume":"55","author":"Z Ji","year":"2023","unstructured":"Ji, Z., et al.: Survey of hallucination in natural language generation. ACM Comput. Surv. 55(12), 1\u201338 (2023)","journal-title":"ACM Comput. Surv."},{"key":"17_CR18","doi-asserted-by":"crossref","unstructured":"Jiang, C., et al.: Hal-eval: a universal and fine-grained hallucination evaluation framework for large vision language models. arXiv preprint arXiv:2402.15721 (2024)","DOI":"10.1145\/3664647.3680576"},{"key":"17_CR19","doi-asserted-by":"publisher","first-page":"32","DOI":"10.1007\/s11263-016-0981-7","volume":"123","author":"R Krishna","year":"2017","unstructured":"Krishna, R., et al.: Visual genome: connecting language and vision using crowdsourced dense image annotations. Int. J. Comput. Vision 123, 32\u201373 (2017)","journal-title":"Int. J. Comput. Vision"},{"issue":"7","key":"17_CR20","doi-asserted-by":"publisher","first-page":"1956","DOI":"10.1007\/s11263-020-01316-z","volume":"128","author":"A Kuznetsova","year":"2020","unstructured":"Kuznetsova, A., et al.: The open images dataset v4: unified image classification, object detection, and visual relationship detection at scale. Int. J. Comput. Vision 128(7), 1956\u20131981 (2020)","journal-title":"Int. J. Comput. Vision"},{"key":"17_CR21","doi-asserted-by":"crossref","unstructured":"Lee, S., Park, S.H., Jo, Y., Seo, M.: Volcano: mitigating multimodal hallucination through self-feedback guided revision. arXiv preprint arXiv:2311.07362 (2023)","DOI":"10.18653\/v1\/2024.naacl-long.23"},{"key":"17_CR22","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597 (2023)"},{"key":"17_CR23","doi-asserted-by":"crossref","unstructured":"Li, J., Cheng, X., Zhao, W.X., Nie, J.Y., Wen, J.R.: HaluEval: a large-scale hallucination evaluation benchmark for large language models. In: Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 6449\u20136464 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.397"},{"key":"17_CR24","doi-asserted-by":"crossref","unstructured":"Li, Y., Du, Y., Zhou, K., Wang, J., Zhao, W.X., Wen, J.R.: Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.20"},{"key":"17_CR25","unstructured":"Lin, C.Y.: ROUGE: a package for automatic evaluation of summaries. In: Text Summarization Branches Out, pp. 74\u201381. Association for Computational Linguistics, Barcelona (2004). https:\/\/aclanthology.org\/W04-1013"},{"key":"17_CR26","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014, Part V. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"17_CR27","unstructured":"Liu, F., et al.: HallusionBench: you see what you think? or you think what you see? An image-context reasoning benchmark challenging for GPT-4V (ision), LLaVA-1.5, and other multi-modality models. arXiv preprint arXiv:2310.14566 (2023)"},{"key":"17_CR28","unstructured":"Liu, F., Lin, K., Li, L., Wang, J., Yacoob, Y., Wang, L.: Aligning large multi-modal model with robust instruction tuning. arXiv preprint arXiv:2306.14565 (2023)"},{"key":"17_CR29","unstructured":"Liu, H., et al.: A survey on hallucination in large vision-language models. arXiv preprint arXiv:2402.00253 (2024)"},{"key":"17_CR30","doi-asserted-by":"crossref","unstructured":"Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning. arXiv preprint arXiv:2310.03744 (2023)","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"17_CR31","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. Adv. Neural Inf. Process. Syst. 36 (2024)"},{"key":"17_CR32","doi-asserted-by":"crossref","unstructured":"Lovenia, H., Dai, W., Cahyawijaya, S., Ji, Z., Fung, P.: Negative object presence evaluation (nope) to measure object hallucination in vision-language models. arXiv preprint arXiv:2310.05338 (2023)","DOI":"10.18653\/v1\/2024.alvr-1.4"},{"key":"17_CR33","doi-asserted-by":"crossref","unstructured":"Marino, K., Rastegari, M., Farhadi, A., Mottaghi, R.: Ok-VQA: a visual question answering benchmark requiring external knowledge. In: Conference on Computer Vision and Pattern Recognition (CVPR) (2019)","DOI":"10.1109\/CVPR.2019.00331"},{"key":"17_CR34","unstructured":"Muhovi\u010d, J., Koporec, G., Per\u0161, J.: Hallucinating hidden obstacles for unmanned surface vehicles using a compositional model (2023)"},{"key":"17_CR35","unstructured":"Pan, J., et al.: JourneyDB: a benchmark for generative image understanding (2023)"},{"key":"17_CR36","doi-asserted-by":"publisher","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: BLEU: a method for automatic evaluation of machine translation (2002). https:\/\/doi.org\/10.3115\/1073083.1073135","DOI":"10.3115\/1073083.1073135"},{"key":"17_CR37","doi-asserted-by":"publisher","unstructured":"Park, J.S., Xiao, X., Warnell, G., Yedidsion, H., Stone, P.: Learning perceptual hallucination for multi-robot navigation in narrow hallways. In: 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 10033\u201310039 (2023). https:\/\/doi.org\/10.1109\/ICRA48891.2023.10161327","DOI":"10.1109\/ICRA48891.2023.10161327"},{"key":"17_CR38","unstructured":"Peng, D.: Langfun (2023). https:\/\/github.com\/google\/langfun"},{"key":"17_CR39","unstructured":"Qian, Y., Zhang, H., Yang, Y., Gan, Z.: How easy is it to fool your multimodal LLMs? An empirical analysis on deceptive prompts. arXiv preprint arXiv:2402.13220 (2024)"},{"key":"17_CR40","unstructured":"Rawte, V., Sheth, A., Das, A.: A survey of hallucination in large foundation models. arXiv preprint arXiv:2309.05922 (2023)"},{"key":"17_CR41","unstructured":"Reid, M., et\u00a0al.: Gemini 1.5: unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530 (2024)"},{"key":"17_CR42","doi-asserted-by":"crossref","unstructured":"Rohrbach, A., Hendricks, L.A., Burns, K., Darrell, T., Saenko, K.: Object hallucination in image captioning. In: Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp. 4035\u20134045 (2018)","DOI":"10.18653\/v1\/D18-1437"},{"key":"17_CR43","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"17_CR44","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"146","DOI":"10.1007\/978-3-031-20074-8_9","volume-title":"Computer Vision \u2013 ECCV 2022","author":"D Schwenk","year":"2022","unstructured":"Schwenk, D., Khandelwal, A., Clark, C., Marino, K., Mottaghi, R.: A-OKVQA: a benchmark for visual question answering using world knowledge. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022, Part VIII. LNCS, vol. 13668, pp. 146\u2013162. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20074-8_9"},{"key":"17_CR45","doi-asserted-by":"publisher","first-page":"e4483","DOI":"10.1136\/bmj.e4483","volume":"345","author":"P Sedgwick","year":"2012","unstructured":"Sedgwick, P.: Pearson\u2019s correlation coefficient. BMJ 345, e4483\u2013e4483 (2012). https:\/\/doi.org\/10.1136\/bmj.e4483","journal-title":"BMJ"},{"key":"17_CR46","doi-asserted-by":"crossref","unstructured":"Sun, Z., et\u00a0al.: Aligning large multimodal models with factually augmented RLHF. arXiv preprint arXiv:2309.14525 (2023)","DOI":"10.18653\/v1\/2024.findings-acl.775"},{"key":"17_CR47","unstructured":"Umapathi, L.K., Pal, A., Sankarasubbu, M.: Med-HALT: medical domain hallucination test for large language models. arXiv preprint arXiv:2307.15343 (2023)"},{"key":"17_CR48","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Lawrence\u00a0Zitnick, C., Parikh, D.: CIDEr: consensus-based image description evaluation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4566\u20134575 (2015)","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"17_CR49","unstructured":"Wang, B., et\u00a0al.: VIGC: visual instruction generation and correction. arXiv preprint arXiv:2308.12714 (2023)"},{"key":"17_CR50","doi-asserted-by":"publisher","unstructured":"Wang, H., Wu, W., Dou, Z., He, L., Yang, L.: Performance and exploration of ChatGPT in medical examination, records and education in Chinese: pave the way for medical AI. Int. J. Med. Inform. 177, 105173 (2023). https:\/\/doi.org\/10.1016\/j.ijmedinf.2023.105173, https:\/\/www.sciencedirect.com\/science\/article\/pii\/S1386505623001910","DOI":"10.1016\/j.ijmedinf.2023.105173"},{"key":"17_CR51","unstructured":"Wang, J., et\u00a0al.: Evaluation and analysis of hallucination in large vision-language models. arXiv preprint arXiv:2308.15126 (2023)"},{"key":"17_CR52","unstructured":"Wang, P., et al.: OFA: unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In: International Conference on Machine Learning, pp. 23318\u201323340. PMLR (2022)"},{"key":"17_CR53","doi-asserted-by":"crossref","unstructured":"Wang, W., et\u00a0al.: Image as a foreign language: BEiT pretraining for all vision and vision-language tasks. arXiv preprint arXiv:2208.10442 (2022)","DOI":"10.1109\/CVPR52729.2023.01838"},{"key":"17_CR54","doi-asserted-by":"crossref","unstructured":"Wang, Z.J., Montoya, E., Munechika, D., Yang, H., Hoover, B., Chau, D.H.: DiffusionDB: a large-scale prompt gallery dataset for text-to-image generative models. arXiv:2210.14896 (2022)","DOI":"10.18653\/v1\/2023.acl-long.51"},{"key":"17_CR55","unstructured":"Ye, Q., et\u00a0al.: mPLUG-owl: modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178 (2023)"},{"key":"17_CR56","doi-asserted-by":"crossref","unstructured":"Ye, Q., et al.: mPLUG-owl2: revolutionizing multi-modal large language model with modality collaboration. arXiv preprint arXiv:2311.04257 (2023)","DOI":"10.1109\/CVPR52733.2024.01239"},{"key":"17_CR57","unstructured":"Yin, S., et al.: WoodPecker: hallucination correction for multimodal large language models. arXiv preprint arXiv:2310.16045 (2023)"},{"key":"17_CR58","doi-asserted-by":"crossref","unstructured":"You, H., et al.: IdealGPT: iteratively decomposing vision and language reasoning via large language models. arXiv preprint arXiv:2305.14985 (2023)","DOI":"10.18653\/v1\/2023.findings-emnlp.755"},{"key":"17_CR59","unstructured":"Yu, J., et\u00a0al.: Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, vol. 2, no. 3, p. 5 (2022)"},{"key":"17_CR60","doi-asserted-by":"crossref","unstructured":"Zellers, R., Bisk, Y., Farhadi, A., Choi, Y.: From recognition to cognition: Visual commonsense reasoning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6720\u20136731 (2019)","DOI":"10.1109\/CVPR.2019.00688"},{"key":"17_CR61","unstructured":"Zhai, B., et al.: HallE-switch: rethinking and controlling object existence hallucinations in large vision language models for detailed caption. arXiv preprint arXiv:2310.01779 (2023)"},{"key":"17_CR62","doi-asserted-by":"crossref","unstructured":"Zhang, Q., Zhang, J., Xu, Y., Tao, D.: Vision transformer with quadrangle attention. IEEE Trans. Pattern Anal. Mach. Intell. (2024)","DOI":"10.1109\/TPAMI.2023.3347693"},{"key":"17_CR63","unstructured":"Zhou, Y., et al.: Analyzing and mitigating object hallucination in large vision-language models. arXiv preprint arXiv:2310.00754 (2023)"},{"key":"17_CR64","unstructured":"Zhu, D., Chen, J., Haydarov, K., Shen, X., Zhang, W., Elhoseiny, M.: ChatGPT asks, BLIP-2 answers: automatic questioning towards enriched visual descriptions. arXiv preprint arXiv:2303.06594 (2023)"},{"key":"17_CR65","unstructured":"Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: MiniGPT-4: enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592 (2023)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72980-5_17","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T09:21:31Z","timestamp":1730107291000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72980-5_17"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031729799","9783031729805"],"references-count":65,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72980-5_17","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"29 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}