{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T22:10:12Z","timestamp":1732745412218,"version":"3.29.0"},"publisher-location":"Cham","reference-count":22,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031705328"},{"type":"electronic","value":"9783031705335"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-70533-5_7","type":"book-chapter","created":{"date-parts":[[2024,9,7]],"date-time":"2024-09-07T05:02:25Z","timestamp":1725685345000},"page":"97-116","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["KVP10k : A Comprehensive Dataset for\u00a0Key-Value Pair Extraction in\u00a0Business Documents"],"prefix":"10.1007","author":[{"given":"Oshri","family":"Naparstek","sequence":"first","affiliation":[]},{"given":"Ophir","family":"Azulai","sequence":"additional","affiliation":[]},{"given":"Inbar","family":"Shapira","sequence":"additional","affiliation":[]},{"given":"Elad","family":"Amrani","sequence":"additional","affiliation":[]},{"given":"Yevgeny","family":"Yaroker","sequence":"additional","affiliation":[]},{"given":"Yevgeny","family":"Burshtein","sequence":"additional","affiliation":[]},{"given":"Roi","family":"Pony","sequence":"additional","affiliation":[]},{"given":"Nadav","family":"Rubinstein","sequence":"additional","affiliation":[]},{"given":"Foad Abo","family":"Dahood","sequence":"additional","affiliation":[]},{"given":"Orit","family":"Prince","sequence":"additional","affiliation":[]},{"given":"Idan","family":"Friedman","sequence":"additional","affiliation":[]},{"given":"Christoph","family":"Auer","sequence":"additional","affiliation":[]},{"given":"Nikolaos","family":"Livathinos","sequence":"additional","affiliation":[]},{"given":"Maksym","family":"Lysak","sequence":"additional","affiliation":[]},{"given":"Ahmed","family":"Nassar","sequence":"additional","affiliation":[]},{"given":"Peter","family":"Staar","sequence":"additional","affiliation":[]},{"given":"Udi","family":"Barzelay","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,8]]},"reference":[{"key":"7_CR1","unstructured":"Beltagy,I., Peters, M.E., Cohan, A.: Longformer: the longdocument transformer. arXiv preprint arXiv:2004.05150 (2020)"},{"key":"7_CR2","doi-asserted-by":"crossref","unstructured":"Ding, Y., et al.: Form-NLU: dataset for the form natural language understanding. In: Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2807\u20132816 (2023)","DOI":"10.1145\/3539618.3591886"},{"key":"7_CR3","unstructured":"Hong, T., et al.: Bros: A pre-trained language model for understanding texts in document. (2020)"},{"key":"7_CR4","doi-asserted-by":"crossref","unstructured":"Huang, Y., et al.: Layoutlmv3: Pre-training for document AI with unified text and image masking. In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 4083\u20134091 (2022)","DOI":"10.1145\/3503161.3548112"},{"key":"7_CR5","doi-asserted-by":"crossref","unstructured":"Huang, Z., et al.: Icdar2019 competition on scanned receipt OCR and information extraction. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). IEEE, pp. 1516\u20131520 (2019)","DOI":"10.1109\/ICDAR.2019.00244"},{"key":"7_CR6","doi-asserted-by":"crossref","unstructured":"Hwang, W., et al.: Spatial dependency parsing for semi-structured document information extraction. arXiv preprint arXiv:2005.00642 (2020)","DOI":"10.18653\/v1\/2021.findings-acl.28"},{"key":"7_CR7","doi-asserted-by":"crossref","unstructured":"Jaume, G., Ekenel, H.K., Thiran, J.P.: Funsd: a dataset for form understanding in noisy scanned documents. In: 2019 International Conference on Document Analysis and Recognition Workshops (ICDARW), Vol. 2, pp. 1\u20136. IEEE (2019)","DOI":"10.1109\/ICDARW.2019.10029"},{"key":"7_CR8","unstructured":"Jiang, A.Q., et al.: Mistral 7B. arXiv preprint arXiv:2310.06825 (2023)"},{"key":"7_CR9","doi-asserted-by":"crossref","unstructured":"Lee, C.Y., et al.: FormNetV2: multimodal Graph Contrastive Learning for Form Document Information Extraction. arXiv preprint arXiv:2305.02549 (2023)","DOI":"10.18653\/v1\/2023.acl-long.501"},{"key":"7_CR10","unstructured":"Liu, Y., et al.: RoBERTa: a robustly optimized BERT pretraining approach. arXiv preprint arXiv:1907.11692 (2019)"},{"key":"7_CR11","doi-asserted-by":"crossref","unstructured":"Mathew, M., Karatzas, D., Jawahar, C.V.: Docvqa: a dataset for VQA on document images. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 2200\u20132209(2021)","DOI":"10.1109\/WACV48630.2021.00225"},{"key":"7_CR12","doi-asserted-by":"crossref","unstructured":"Mathur, P., et al.: LayerDoc: layer-wise extraction of spatial hierarchical structure in visually-rich documents. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 3610\u20133620 (2023)","DOI":"10.1109\/WACV56688.2023.00360"},{"key":"7_CR13","unstructured":"Naparstek, O., et al.: BusiNet-a light and fast text detection network for business documents. I arXiv preprint arXiv:2207.01220 (2022)"},{"key":"7_CR14","unstructured":"Park, S., et al.: CORD: a consolidated receipt dataset for post- OCR parsing. In: Workshop on Document Intelligence at NeurIPS 2019 (2019)"},{"key":"7_CR15","doi-asserted-by":"crossref","unstructured":"Perot, V., et al.: LMDX: Language Model-based Document Information Extraction and Localization. arXiv preprint arXiv:2309.10952 (2023)","DOI":"10.18653\/v1\/2024.findings-acl.899"},{"key":"7_CR16","doi-asserted-by":"crossref","unstructured":"Smith, R.: An overview of the Tesseract OCR engine. In: Ninth International Conference on Document Analysis and Recognition (ICDAR 2007), Vol. 2, pp. 629\u2013633. IEEE (2007)","DOI":"10.1109\/ICDAR.2007.4376991"},{"key":"7_CR17","doi-asserted-by":"publisher","unstructured":"Stanis\u0142awek, T., et al.: Kleister: key information extraction datasets involving long documents with complex layouts. In: International Conference on Document Analysis and Recognition, pp. 564\u2013579. Springer (2021). https:\/\/doi.org\/10.1007\/978-3-030-86549-8_36","DOI":"10.1007\/978-3-030-86549-8_36"},{"key":"7_CR18","doi-asserted-by":"crossref","unstructured":"Wang, J., et al.: Towards robust visual information extraction in real world: new dataset and novel solution. In: Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 35 no. 4 , pp. 2738\u20132745 (2021)","DOI":"10.1609\/aaai.v35i4.16378"},{"key":"7_CR19","doi-asserted-by":"crossref","unstructured":"Wang, Z., et al.: DocStruct: a multimodal method to extract hierarchy structure in document for general form understanding. arXiv preprint arXiv:2010.11685 (2020)","DOI":"10.18653\/v1\/2020.findings-emnlp.80"},{"key":"7_CR20","doi-asserted-by":"crossref","unstructured":"Wang, Z., et al.: VRDU: a benchmark for visually-rich document understanding. In: Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, pp. 5184\u20135193 (2023)","DOI":"10.1145\/3580305.3599929"},{"key":"7_CR21","doi-asserted-by":"crossref","unstructured":"Xu, Y., et al.: XFUND: a benchmark dataset for multilingual visually rich form understanding. In: Findings of the Association for Computational Linguistics: ACL 2022, pp. 3214\u20133224 (2022)","DOI":"10.18653\/v1\/2022.findings-acl.253"},{"key":"7_CR22","doi-asserted-by":"crossref","unstructured":"Yang, Z., et al.: Modeling entities as semantic points for visual information extraction in the wild. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15358\u201315367 (2023)","DOI":"10.1109\/CVPR52729.2023.01474"}],"container-title":["Lecture Notes in Computer Science","Document Analysis and Recognition - ICDAR 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-70533-5_7","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T21:45:18Z","timestamp":1732743918000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-70533-5_7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031705328","9783031705335"],"references-count":22,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-70533-5_7","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"8 September 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICDAR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Document Analysis and Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Athens","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30 August 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 September 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icdar2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icdar2024.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}