{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T14:38:24Z","timestamp":1730212704297,"version":"3.28.0"},"reference-count":45,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,6]]},"DOI":"10.1109\/cvpr52688.2022.00314","type":"proceedings-article","created":{"date-parts":[[2022,9,27]],"date-time":"2022-09-27T15:56:41Z","timestamp":1664294201000},"page":"3128-3137","source":"Crossref","is-referenced-by-count":1,"title":["Tencent-MVSE: A Large-Scale Benchmark Dataset for Multi-Modal Video Similarity Evaluation"],"prefix":"10.1109","author":[{"given":"Zhaoyang","family":"Zeng","sequence":"first","affiliation":[{"name":"Tencent,QQ Browser Lab"}]},{"given":"Yongsheng","family":"Luo","sequence":"additional","affiliation":[{"name":"Tencent,QQ Browser Lab"}]},{"given":"Zhenhua","family":"Liu","sequence":"additional","affiliation":[{"name":"Tencent,QQ Browser Lab"}]},{"given":"Fengyun","family":"Rao","sequence":"additional","affiliation":[{"name":"Tencent,QQ Browser Lab"}]},{"given":"Dian","family":"Li","sequence":"additional","affiliation":[{"name":"Tencent,QQ Browser Lab"}]},{"given":"Weidong","family":"Guo","sequence":"additional","affiliation":[{"name":"Tencent,QQ Browser Lab"}]},{"given":"Zhen","family":"Wen","sequence":"additional","affiliation":[{"name":"Tencent,QQ Browser Lab"}]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00468"},{"key":"ref38","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"0","journal-title":"NIPS"},{"key":"ref33","article-title":"Ucf101: A dataset of 101 human actions classes from videos in the wild","author":"soomro","year":"2012","journal-title":"ArXiv Preprint"},{"key":"ref32","article-title":"How2: a large-scale dataset for multimodal language understanding","author":"sanabria","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0987-1"},{"key":"ref30","article-title":"Learning transferable visual models from natural language supervision","author":"radford","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref37","first-page":"6105","article-title":"Efficientnet: Rethinking model scaling for convolutional neural networks","author":"tan","year":"0","journal-title":"ICML"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00756"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P17-2034"},{"key":"ref34","article-title":"Vl-bert: Pre-training of generic visual-linguistic representations","author":"su","year":"0","journal-title":"ICLRE"},{"key":"ref10","first-page":"4171","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"0","journal-title":"NAACL"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"ref11","article-title":"An image is worth 16×16 words: Transformers for image recognition at scale","author":"dosovitskiy","year":"0","journal-title":"ICLRE"},{"key":"ref12","article-title":"Vse++: Improving visual-semantic embeddings with hard negatives","author":"faghri","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00630"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3479218"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01278"},{"key":"ref17","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1016\/j.cviu.2016.10.018","article-title":"The thumos challenge on action recognition for videos “in the wild","volume":"155","author":"idrees","year":"2017","journal-title":"Computer Vision and Image Understanding"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.223"},{"key":"ref19","article-title":"The kinetics human action video dataset","author":"kay","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref28","article-title":"Roberta: A robustly optimized bert pretraining approach","author":"liu","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298698"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00399"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.279"},{"key":"ref6","first-page":"104","article-title":"Uniter: Universal image-text representation learning","author":"chen","year":"0","journal-title":"ECCV"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00272"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/S17-2001"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2013.340"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.58"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00636"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref1","article-title":"Youtube-8m: A large-scale video classification benchmark","author":"abu-el-haija","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.83"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12342"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00684"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00725"},{"key":"ref41","article-title":"Star: A benchmark for situated reasoning in real-world videos","author":"wu","year":"0","journal-title":"NeurIPS"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/3219819.3219856"},{"key":"ref44","article-title":"S3d: single shot multi-span detector via fully 3d convolutional networks","author":"zhang","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.502"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00688"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.161"}],"event":{"name":"2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","start":{"date-parts":[[2022,6,18]]},"location":"New Orleans, LA, USA","end":{"date-parts":[[2022,6,24]]}},"container-title":["2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9878378\/9878366\/09879520.pdf?arnumber=9879520","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,14]],"date-time":"2022-10-14T16:53:56Z","timestamp":1665766436000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9879520\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6]]},"references-count":45,"URL":"https:\/\/doi.org\/10.1109\/cvpr52688.2022.00314","relation":{},"subject":[],"published":{"date-parts":[[2022,6]]}}}