{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T05:22:44Z","timestamp":1732684964710,"version":"3.28.2"},"publisher-location":"Cham","reference-count":59,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031730009","type":"print"},{"value":"9783031730016","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:00:00Z","timestamp":1732665600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:00:00Z","timestamp":1732665600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:00:00Z","timestamp":1732665600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:00:00Z","timestamp":1732665600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73001-6_11","type":"book-chapter","created":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T10:22:41Z","timestamp":1732616561000},"page":"183-200","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["HERGen: Elevating Radiology Report Generation with\u00a0Longitudinal Data"],"prefix":"10.1007","author":[{"ORCID":"http:\/\/orcid.org\/0000-0002-3313-6479","authenticated-orcid":false,"given":"Fuying","family":"Wang","sequence":"first","affiliation":[]},{"given":"Shenghui","family":"Du","sequence":"additional","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0002-9315-6527","authenticated-orcid":false,"given":"Lequan","family":"Yu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,27]]},"reference":[{"key":"11_CR1","doi-asserted-by":"publisher","DOI":"10.1016\/j.imu.2021.100557","volume":"24","author":"O Alfarghaly","year":"2021","unstructured":"Alfarghaly, O., Khaled, R., Elkorany, A., Helal, M., Fahmy, A.: Automated radiology report generation using conditioned transformers. Inform. Med. Unlocked 24, 100557 (2021)","journal-title":"Inform. Med. Unlocked"},{"key":"11_CR2","doi-asserted-by":"crossref","unstructured":"Alsentzer, E., Murphy, J.R., Boag, W., Weng, W.H., Jin, D., Naumann, T., McDermott, M.: Publicly available clinical BERT embeddings. arXiv preprint arXiv:1904.03323 (2019)","DOI":"10.18653\/v1\/W19-1909"},{"key":"11_CR3","unstructured":"Banerjee, S., Lavie, A.: METEOR: an automatic metric for MT evaluation with improved correlation with human judgments. In: Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and\/or Summarization, pp. 65\u201372 (2005)"},{"key":"11_CR4","doi-asserted-by":"crossref","unstructured":"Bannur, S., et\u00a0al.: MS-CXR-T: learning to exploit temporal structure for biomedical vision-language processing (2023)","DOI":"10.1109\/CVPR52729.2023.01442"},{"key":"11_CR5","doi-asserted-by":"crossref","unstructured":"Bannur, S., et\u00a0al.: Learning to exploit temporal structure for biomedical vision-language processing. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15016\u201315027 (2023)","DOI":"10.1109\/CVPR52729.2023.01442"},{"key":"11_CR6","doi-asserted-by":"crossref","unstructured":"Beltagy, I., Lo, K., Cohan, A.: SciBERT: a pretrained language model for scientific text. arXiv preprint arXiv:1903.10676 (2019)","DOI":"10.18653\/v1\/D19-1371"},{"key":"11_CR7","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/978-3-031-20059-5_1","volume-title":"European Conference on Computer Vision 2022","author":"B Boecking","year":"2022","unstructured":"Boecking, B., et al.: Making the most of text semantics to improve biomedical vision-language processing. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13696, pp. 1\u201321. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20059-5_1"},{"issue":"1","key":"11_CR8","doi-asserted-by":"publisher","first-page":"37","DOI":"10.1177\/08465371221117282","volume":"74","author":"DJ Cao","year":"2023","unstructured":"Cao, D.J., Hurrell, C., Patlas, M.N.: Current status of burnout in Canadian radiology. Can. Assoc. Radiol. J. 74(1), 37\u201343 (2023)","journal-title":"Can. Assoc. Radiol. J."},{"key":"11_CR9","doi-asserted-by":"crossref","unstructured":"Chen, Z., Shen, Y., Song, Y., Wan, X.: Cross-modal memory networks for radiology report generation. arXiv preprint arXiv:2204.13258 (2022)","DOI":"10.18653\/v1\/2021.acl-long.459"},{"key":"11_CR10","doi-asserted-by":"crossref","unstructured":"Chen, Z., Song, Y., Chang, T.H., Wan, X.: Generating radiology reports via memory-driven transformer. arXiv preprint arXiv:2010.16056 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.112"},{"key":"11_CR11","doi-asserted-by":"crossref","unstructured":"Cornia, M., Stefanini, M., Baraldi, L., Cucchiara, R.: Meshed-memory transformer for image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10578\u201310587 (2020)","DOI":"10.1109\/CVPR42600.2020.01059"},{"key":"11_CR12","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"issue":"1","key":"11_CR13","first-page":"1","volume":"3","author":"Y Gu","year":"2021","unstructured":"Gu, Y., et al.: Domain-specific language model pretraining for biomedical natural language processing. ACM Trans. Comput. Healthc. (HEALTH) 3(1), 1\u201323 (2021)","journal-title":"ACM Trans. Comput. Healthc. (HEALTH)"},{"key":"11_CR14","doi-asserted-by":"crossref","unstructured":"Huang, L., Wang, W., Chen, J., Wei, X.Y.: Attention on attention for image captioning. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 4634\u20134643 (2019)","DOI":"10.1109\/ICCV.2019.00473"},{"key":"11_CR15","doi-asserted-by":"crossref","unstructured":"Huang, S.C., Shen, L., Lungren, M.P., Yeung, S.: GLoRIA: a multimodal global-local representation learning framework for label-efficient medical image recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3942\u20133951 (2021)","DOI":"10.1109\/ICCV48922.2021.00391"},{"key":"11_CR16","doi-asserted-by":"crossref","unstructured":"Huang, Z., Zhang, X., Zhang, S.: KiUT: knowledge-injected U-transformer for radiology report generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19809\u201319818 (2023)","DOI":"10.1109\/CVPR52729.2023.01897"},{"key":"11_CR17","doi-asserted-by":"crossref","unstructured":"Jing, B., Wang, Z., Xing, E.: Show, describe and conclude: on exploiting the structure information of chest X-ray reports. arXiv preprint arXiv:2004.12274 (2020)","DOI":"10.18653\/v1\/P19-1657"},{"key":"11_CR18","doi-asserted-by":"crossref","unstructured":"Jing, B., Xie, P., Xing, E.: On the automatic generation of medical imaging reports. arXiv preprint arXiv:1711.08195 (2017)","DOI":"10.18653\/v1\/P18-1240"},{"key":"11_CR19","unstructured":"Johnson, A., et al.: MIMIC-CXR-JPG-chest radiographs with structured labels. PhysioNet (2019)"},{"key":"11_CR20","doi-asserted-by":"publisher","first-page":"581","DOI":"10.1007\/978-3-031-16431-6_55","volume-title":"Medical Image Computing and Computer-Assisted Intervention","author":"G Karwande","year":"2022","unstructured":"Karwande, G., Mbakwe, A.B., Wu, J.T., Celi, L.A., Moradi, M., Lourentzou, I.: CheXRelNet: an anatomy-aware model for tracking longitudinal relationships between chest X-rays. In: Wang, L., Dou, Q., Fletcher, P.T., Speidel, S., Li, S. (eds.) MICCAI 2022. LNCS, vol. 13431, pp. 581\u2013591. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-16431-6_55"},{"key":"11_CR21","doi-asserted-by":"crossref","unstructured":"Li, M., Lin, B., Chen, Z., Lin, H., Liang, X., Chang, X.: Dynamic graph enhanced contrastive learning for chest X-ray report generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3334\u20133343 (2023)","DOI":"10.1109\/CVPR52729.2023.00325"},{"key":"11_CR22","unstructured":"Lin, C.Y.: ROUGE: a package for automatic evaluation of summaries. In: Text Summarization Branches Out, pp. 74\u201381 (2004)"},{"key":"11_CR23","doi-asserted-by":"crossref","unstructured":"Liu, F., Wu, X., Ge, S., Fan, W., Zou, Y.: Exploring and distilling posterior and prior knowledge for radiology report generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13753\u201313762 (2021)","DOI":"10.1109\/CVPR46437.2021.01354"},{"key":"11_CR24","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101 (2017)"},{"key":"11_CR25","unstructured":"Ma, X., et al.: Contrastive attention for automatic chest X-ray report generation. arXiv preprint arXiv:2106.06965 (2021)"},{"key":"11_CR26","doi-asserted-by":"crossref","unstructured":"Miura, Y., Zhang, Y., Tsai, E.B., Langlotz, C.P., Jurafsky, D.: Improving factual completeness and consistency of image-to-text radiology report generation. arXiv preprint arXiv:2010.10042 (2020)","DOI":"10.18653\/v1\/2021.naacl-main.416"},{"key":"11_CR27","doi-asserted-by":"publisher","DOI":"10.1016\/j.artmed.2023.102633","volume":"144","author":"A Nicolson","year":"2023","unstructured":"Nicolson, A., Dowling, J., Koopman, B.: Improving chest X-ray report generation by leveraging warm starting. Artif. Intell. Med. 144, 102633 (2023)","journal-title":"Artif. Intell. Med."},{"key":"11_CR28","doi-asserted-by":"crossref","unstructured":"Nooralahzadeh, F., Gonzalez, N.P., Frauenfelder, T., Fujimoto, K., Krauthammer, M.: Progressive transformer-based generation of radiology reports. arXiv preprint arXiv:2102.09777 (2021)","DOI":"10.18653\/v1\/2021.findings-emnlp.241"},{"key":"11_CR29","unstructured":"an den Oord, A., Li, Y., Vinyals, O.: Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748 (2018)"},{"key":"11_CR30","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: BLEU: a method for automatic evaluation of machine translation. In: Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pp. 311\u2013318 (2002)","DOI":"10.3115\/1073083.1073135"},{"issue":"7","key":"11_CR31","doi-asserted-by":"publisher","first-page":"1691","DOI":"10.1007\/s10115-022-01684-7","volume":"64","author":"J Pavlopoulos","year":"2022","unstructured":"Pavlopoulos, J., Kougia, V., Androutsopoulos, I., Papamichail, D.: Diagnostic captioning: a survey. Knowl. Inf. Syst. 64(7), 1691\u20131722 (2022)","journal-title":"Knowl. Inf. Syst."},{"key":"11_CR32","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"issue":"8","key":"11_CR33","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I., et al.: Language models are unsupervised multitask learners. OpenAI Blog 1(8), 9 (2019)","journal-title":"OpenAI Blog"},{"key":"11_CR34","unstructured":"Ramesh, V., Chi, N.A., Rajpurkar, P.: Improving radiology report generation systems by removing hallucinated references to non-existent priors. In: Machine Learning for Health, pp. 456\u2013473. PMLR (2022)"},{"issue":"2","key":"11_CR35","doi-asserted-by":"publisher","first-page":"545","DOI":"10.1378\/chest.10-1302","volume":"141","author":"S Raoof","year":"2012","unstructured":"Raoof, S., Feigin, D., Sung, A., Raoof, S., Irugulpati, L., Rosenow, E.C., III.: Interpretation of plain chest roentgenogram. Chest 141(2), 545\u2013558 (2012)","journal-title":"Chest"},{"key":"11_CR36","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. In: Advances in Neural Information Processing Systems 28 (2015)"},{"key":"11_CR37","doi-asserted-by":"crossref","unstructured":"Rimmer, A.: Radiologist shortage leaves patient care at risk, warns royal college. BMJ: Br. Med. J. 359 (2017)","DOI":"10.1136\/bmj.j4683"},{"key":"11_CR38","unstructured":"Sanh, V., Debut, L., Chaumond, J., Wolf, T.: DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108 (2019)"},{"key":"11_CR39","doi-asserted-by":"crossref","unstructured":"Serra, F.D., Wang, C., Deligianni, F., Dalton, J., O\u2019Neil, A.Q.: Controllable chest X-ray report generation from longitudinal representations. arXiv preprint arXiv:2310.05881 (2023)","DOI":"10.18653\/v1\/2023.findings-emnlp.325"},{"key":"11_CR40","doi-asserted-by":"crossref","unstructured":"Smit, A., Jain, S., Rajpurkar, P., Pareek, A., Ng, A.Y., Lungren, M.P.: CheXbert: combining automatic labelers and expert annotations for accurate radiology report labeling using BERT. arXiv preprint arXiv:2004.09167 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.117"},{"issue":"1","key":"11_CR41","first-page":"25","volume":"18","author":"MS Sorower","year":"2010","unstructured":"Sorower, M.S.: A literature survey on algorithms for multi-label learning. Oregon State Univ. Corvallis 18(1), 25 (2010)","journal-title":"Oregon State Univ. Corvallis"},{"key":"11_CR42","doi-asserted-by":"crossref","unstructured":"Tanida, T., M\u00fcller, P., Kaissis, G., Rueckert, D.: Interactive and explainable region-guided radiology report generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7433\u20137442 (2023)","DOI":"10.1109\/CVPR52729.2023.00718"},{"issue":"3","key":"11_CR43","doi-asserted-by":"publisher","first-page":"504","DOI":"10.1016\/j.jacr.2017.12.026","volume":"15","author":"JH Thrall","year":"2018","unstructured":"Thrall, J.H., et al.: Artificial intelligence and machine learning in radiology: opportunities, challenges, pitfalls, and criteria for success. J. Am. Coll. Radiol. 15(3), 504\u2013508 (2018)","journal-title":"J. Am. Coll. Radiol."},{"key":"11_CR44","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems 30 (2017)"},{"key":"11_CR45","unstructured":"Veli\u010dkovi\u0107, P., Cucurull, G., Casanova, A., Romero, A., Lio, P., Bengio, Y.: Graph attention networks. arXiv preprint arXiv:1710.10903 (2017)"},{"key":"11_CR46","doi-asserted-by":"crossref","unstructured":"Vinyals, O., Toshev, A., Bengio, S., Erhan, D.: Show and tell: a neural image caption generator. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3156\u20133164 (2015)","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"11_CR47","unstructured":"Wang, F., Zhou, Y., Wang, S., Vardhanabhuti, V., Yu, L.: Multi-granularity cross-modal alignment for generalized medical visual representation learning. In: Advances in Neural Information Processing Systems 35, pp. 33536\u201333549 (2022)"},{"key":"11_CR48","doi-asserted-by":"publisher","first-page":"563","DOI":"10.1007\/978-3-031-19833-5_33","volume-title":"European Conference on Computer Vision 2022","author":"J Wang","year":"2022","unstructured":"Wang, J., Bhalerao, A., He, Y.: Cross-modal prototype driven network for radiology report generation. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13695, pp. 563\u2013579. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19833-5_33"},{"key":"11_CR49","doi-asserted-by":"crossref","unstructured":"Wang, X., Peng, Y., Lu, L., Lu, Z., Summers, R.M.: TieNet: text-image embedding network for common thorax disease classification and reporting in chest X-rays. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 9049\u20139058 (2018)","DOI":"10.1109\/CVPR.2018.00943"},{"key":"11_CR50","doi-asserted-by":"crossref","unstructured":"Wang, Z., Liu, L., Wang, L., Zhou, L.: METransformer: radiology report generation by transformer with multiple learnable expert tokens. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11558\u201311567 (2023)","DOI":"10.1109\/CVPR52729.2023.01112"},{"key":"11_CR51","doi-asserted-by":"crossref","unstructured":"Wang, Z., Wu, Z., Agarwal, D., Sun, J.: MedCLIP: contrastive learning from unpaired medical images and text. arXiv preprint arXiv:2210.10163 (2022)","DOI":"10.18653\/v1\/2022.emnlp-main.256"},{"key":"11_CR52","doi-asserted-by":"crossref","unstructured":"Wu, H., et al.: CVT: introducing convolutions to vision transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 22\u201331 (2021)","DOI":"10.1109\/ICCV48922.2021.00009"},{"key":"11_CR53","unstructured":"Wu, J.T., et\u00a0al.: Chest imagenome dataset for clinical reasoning. arXiv preprint arXiv:2108.00316 (2021)"},{"key":"11_CR54","unstructured":"Xu, K., et al.: Show, attend and tell: neural image caption generation with visual attention. In: International Conference on Machine Learning, pp. 2048\u20132057. PMLR (2015)"},{"key":"11_CR55","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"72","DOI":"10.1007\/978-3-030-87199-4_7","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2021","author":"D You","year":"2021","unstructured":"You, D., Liu, F., Ge, S., Xie, X., Zhang, J., Wu, X.: AlignTransformer: hierarchical alignment of visual regions and disease tags for medical report generation. In: de Bruijne, M., et al. (eds.) MICCAI 2021, Part III. LNCS, vol. 12903, pp. 72\u201382. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87199-4_7"},{"key":"11_CR56","doi-asserted-by":"crossref","unstructured":"You, Q., Jin, H., Wang, Z., Fang, C., Luo, J.: Image captioning with semantic attention. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4651\u20134659 (2016)","DOI":"10.1109\/CVPR.2016.503"},{"key":"11_CR57","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Wang, X., Xu, Z., Yu, Q., Yuille, A., Xu, D.: When radiology report generation meets knowledge graph. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a034, pp. 12910\u201312917 (2020)","DOI":"10.1609\/aaai.v34i07.6989"},{"key":"11_CR58","unstructured":"Zhang, Y., Jiang, H., Miura, Y., Manning, C.D., Langlotz, C.P.: Contrastive learning of medical visual representations from paired images and text. In: Machine Learning for Healthcare Conference, pp. 2\u201325. PMLR (2022)"},{"key":"11_CR59","doi-asserted-by":"crossref","unstructured":"Zhu, Q., Mathai, T.S., Mukherjee, P., Peng, Y., Summers, R.M., Lu, Z.: Utilizing longitudinal chest X-rays and reports to pre-fill radiology reports. arXiv preprint arXiv:2306.08749 (2023)","DOI":"10.1007\/978-3-031-43904-9_19"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73001-6_11","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T11:09:16Z","timestamp":1732619356000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73001-6_11"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,27]]},"ISBN":["9783031730009","9783031730016"],"references-count":59,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73001-6_11","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,27]]},"assertion":[{"value":"27 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}