{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T21:37:03Z","timestamp":1730237823862,"version":"3.28.0"},"reference-count":64,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,10,1]]},"DOI":"10.1109\/iccv51070.2023.01792","type":"proceedings-article","created":{"date-parts":[[2024,1,15]],"date-time":"2024-01-15T20:55:59Z","timestamp":1705352159000},"page":"19505-19516","source":"Crossref","is-referenced-by-count":0,"title":["SCOB: Universal Text Understanding via Character-wise Supervised Contrastive Learning with Online Text Rendering for Bridging Domain Gap"],"prefix":"10.1109","author":[{"given":"Daehee","family":"Kim","sequence":"first","affiliation":[{"name":"NAVER Cloud AI"}]},{"given":"Yoonsik","family":"Kim","sequence":"additional","affiliation":[{"name":"NAVER Cloud AI"}]},{"given":"DongHyun","family":"Kim","sequence":"additional","affiliation":[{"name":"NAVER Cloud AI"}]},{"given":"Yumin","family":"Lim","sequence":"additional","affiliation":[{"name":"Seoul National University"}]},{"given":"Geewook","family":"Kim","sequence":"additional","affiliation":[{"name":"NAVER Cloud AI"}]},{"given":"Taeho","family":"Kil","sequence":"additional","affiliation":[{"name":"NAVER Cloud AI"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01505"},{"article-title":"Flamingo: a visual language model for few-shot learning","year":"2022","author":"Alayrac","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00313"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01605"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00439"},{"key":"ref6","first-page":"1597","article-title":"A simple framework for contrastive learning of visual representations","volume-title":"International conference on machine learning","author":"Chen"},{"article-title":"Pix2seq: A language modeling framework for object detection","year":"2021","author":"Chen","key":"ref7"},{"article-title":"Improved baselines with momentum contrastive learning","year":"2020","author":"Chen","key":"ref8"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00950"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2017.157"},{"article-title":"Pillow (pil fork) documentation","year":"2015","author":"Clark","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-25069-9_19"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref14","first-page":"980","article-title":"Image-to-markup generation with coarse-to-fine attention","volume-title":"International Conference on Machine Learning","author":"Deng"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"key":"ref16","first-page":"21271","article-title":"Bootstrap your own latent-a new approach to self-supervised learning","volume":"33","author":"Grill","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.254"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00380"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2015.7333910"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21322"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548112"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00872"},{"article-title":"Understanding dimensional collapse in contrastive self-supervised learning","volume-title":"International Conference on Learning Representations","author":"Jing","key":"ref24"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58545-7_41"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2015.7333942"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2013.221"},{"key":"ref28","first-page":"4171","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proceedings of NAACL-HLT","author":"Ming-Wei Chang Kenton"},{"key":"ref29","first-page":"18661","article-title":"Supervised contrastive learning","volume":"33","author":"Khosla","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Prestu: Pre-training for scene-text understanding","year":"2022","author":"Kil","key":"ref30"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01461"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19815-1_29"},{"article-title":"Adam: A method for stochastic optimization","year":"2014","author":"Kingma","key":"ref33"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00456"},{"article-title":"Openimages: A public dataset for large-scale multi-label and multi-class image classification","year":"2017","author":"Krasin","key":"ref35"},{"article-title":"Pix2struct: Screenshot parsing as pretraining for visual language understanding","year":"2022","author":"Lee","key":"ref36"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1145\/1148170.1148307"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00560"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00112"},{"article-title":"Unified-io: A unified model for vision, language, and multi-modal tasks","year":"2022","author":"Lu","key":"ref42"},{"key":"ref43","first-page":"1697","article-title":"Infographicvqa","volume-title":"Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision","author":"Mathew"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00225"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2019.00156"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00457"},{"article-title":"Cord: a consolidated receipt dataset for post-ocr parsing","volume-title":"Workshop on Document Intelligence at NeurIPS 2019","author":"Park","key":"ref47"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547942"},{"key":"ref49","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International Conference on Machine Learning","author":"Radford"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00851"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00869"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.230"},{"issue":"86","key":"ref53","first-page":"2579","article-title":"Visualizing data using t-sne","volume":"9","author":"van der Maaten","year":"2008","journal-title":"Journal of Machine Learning Research"},{"key":"ref54","article-title":"Attention is all you need","volume-title":"Advances in Neural Information Processing Systems","volume":"30","author":"Vaswani","year":"2017"},{"article-title":"Git: A generative image-to-text transformer for vision and language","year":"2022","author":"Wang","key":"ref55"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1989.1.2.270"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00393"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403172"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.201"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19815-1_17"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20059-5_30"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00864"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58589-1_34"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2019.00166"}],"event":{"name":"2023 IEEE\/CVF International Conference on Computer Vision (ICCV)","start":{"date-parts":[[2023,10,1]]},"location":"Paris, France","end":{"date-parts":[[2023,10,6]]}},"container-title":["2023 IEEE\/CVF International Conference on Computer Vision (ICCV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10376473\/10376477\/10377475.pdf?arnumber=10377475","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,18]],"date-time":"2024-01-18T01:37:31Z","timestamp":1705541851000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10377475\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,1]]},"references-count":64,"URL":"https:\/\/doi.org\/10.1109\/iccv51070.2023.01792","relation":{},"subject":[],"published":{"date-parts":[[2023,10,1]]}}}