{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T21:38:13Z","timestamp":1730237893140,"version":"3.28.0"},"reference-count":112,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,10,1]]},"DOI":"10.1109\/iccv51070.2023.00487","type":"proceedings-article","created":{"date-parts":[[2024,1,15]],"date-time":"2024-01-15T15:55:59Z","timestamp":1705334159000},"page":"5262-5274","source":"Crossref","is-referenced-by-count":13,"title":["EgoVLPv2: Egocentric Video-Language Pre-training with Fusion in the Backbone"],"prefix":"10.1109","author":[{"given":"Shraman","family":"Pramanick","sequence":"first","affiliation":[{"name":"Johns Hopkins University"}]},{"given":"Yale","family":"Song","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"Sayan","family":"Nag","sequence":"additional","affiliation":[{"name":"University of Toronto"}]},{"given":"Kevin Qinghong","family":"Lin","sequence":"additional","affiliation":[{"name":"National University of Singapore"}]},{"given":"Hardik","family":"Shah","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"Mike Zheng","family":"Shou","sequence":"additional","affiliation":[{"name":"National University of Singapore"}]},{"given":"Rama","family":"Chellappa","sequence":"additional","affiliation":[{"name":"Johns Hopkins University"}]},{"given":"Pengchuan","family":"Zhang","sequence":"additional","affiliation":[{"name":"Meta AI"}]}],"member":"263","reference":[{"key":"ref1","first-page":"24206","article-title":"Vatt: Transformers for multimodal self-supervised learning from raw video, audio and text","volume":"34","author":"Akbari","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02209"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref5","article-title":"Vlmo: Unified vision-language pre-training with mixture-of-modality-experts","author":"Bao","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref6","first-page":"813","article-title":"Is space-time attention all you need for video understanding?","volume-title":"International Conference on Machine Learning","author":"Bertasius"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00467"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298698"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3350571"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/WACV45572.2020.9093511"},{"key":"ref12","article-title":"Cross-lingual language model pretraining","volume":"32","author":"Conneau","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-021-01531-2"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/n19\u20131423"},{"article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","volume-title":"International Conference on Learning Representations","author":"Dosovitskiy","key":"ref15"},{"key":"ref16","article-title":"Coarse-to-fine vision-language pre-training with fusion in the backbone","author":"Dou","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01763"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00210"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00630"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/2736277.2741112"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.563"},{"key":"ref23","article-title":"Diverse sequential subset selection for supervised video summarization","volume":"27","author":"Gong","year":"2014","journal-title":"Advances in neural information processing systems"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01842"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-1168"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3481540"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i1.25178"},{"article-title":"Egotaskqa: Understanding human tasks in egocentric videos","volume-title":"Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track","author":"Jia","key":"ref28"},{"key":"ref29","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"International Conference on Machine Learning","author":"Jia"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/3323873.3325040"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6767"},{"key":"ref32","first-page":"25566","article-title":"Reformulating zero-shot action recognition for multi-label actions","volume":"34","author":"Kerrigan","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.83"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00999"},{"article-title":"Parameter efficient multimodal transformers for video representation learning","volume-title":"International Conference on Learning Representations","author":"Lee","key":"ref35"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475431"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00725"},{"article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"ICML","author":"Li","key":"ref38"},{"key":"ref39","first-page":"9694","article-title":"Align before fuse: Vision and language representation learning with momentum distillation","volume":"34","author":"Li","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.161"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02214"},{"article-title":"Visualbert: A simple and performant baseline for vision and language","year":"2019","author":"Li","key":"ref42"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1145\/3323873.3325050"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33018658"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_8"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02240"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00687"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298625"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01935"},{"key":"ref50","article-title":"Egocentric video-language pretraining","author":"Lin","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00262"},{"article-title":"Roberta: A robustly optimized bert pretraining approach","year":"2019","author":"Liu","key":"ref52"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00320"},{"article-title":"Decoupled weight decay regularization","volume-title":"International Conference on Learning Representations","author":"Loshchilov","key":"ref55"},{"key":"ref56","article-title":"Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","volume":"32","author":"Lu","year":"2019","journal-title":"Advances in neural information processing systems"},{"article-title":"Univl: A unified video and language pre-training model for multimodal understanding and generation","year":"2020","author":"Luo","key":"ref57"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00990"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00272"},{"article-title":"Imu2clip: Multimodal contrastive learning for imu motion sensors from egocentric videos and text","year":"2022","author":"Moon","key":"ref60"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01082"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-68238-5_47"},{"key":"ref63","first-page":"13988","article-title":"Clip-it! language-guided video summarization","volume":"34","author":"Narasimhan","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Representation learning with contrastive predictive coding","year":"2018","author":"van den Oord","key":"ref64"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00274"},{"article-title":"Support-set bottlenecks for video-text representation learning","volume-title":"International Conference on Learning Representations","author":"Patrick","key":"ref66"},{"article-title":"An outlook into the future of egocentric vision","year":"2023","author":"Plizzari","key":"ref67"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10599-4_35"},{"article-title":"Volta: Vision-language transformer with weakly-supervised local-feature alignment","year":"2022","author":"Pramanick","key":"ref69"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00062"},{"key":"ref71","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International conference on machine learning","author":"Radford"},{"issue":"8","key":"ref72","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"issue":"1","key":"ref73","first-page":"5485","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"The Journal of Machine Learning Research"},{"article-title":"Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter","year":"2019","author":"Sanh","key":"ref74"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00952"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_1"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.229"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00772"},{"article-title":"Charades-ego: A large-scale dataset of paired third and first person videos","year":"2018","author":"Sigurdsson","key":"ref79"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW54120.2021.00361"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00756"},{"key":"ref82","article-title":"Long-form video-language pre-training with multimodal temporal contrastive learning","author":"Sun","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00439"},{"key":"ref84","article-title":"Attention is all you need","volume":"30","author":"Vaswani","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00331"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00638"},{"journal-title":"Transactions of Machine Learning Research","article-title":"Git: A generative image-to-text transformer for vision and language","author":"Wang","key":"ref87"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02226"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01838"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00054"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01025"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.2985868"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6929"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01267-0_19"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1145\/3123266.3123427"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.370"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.544"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00498"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01136"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01857"},{"key":"ref102","article-title":"Xlnet: Generalized autoregressive pretraining for language understanding","volume":"32","author":"Yang","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20059-5_30"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_29"},{"key":"ref105","first-page":"23634","article-title":"Merlot: Multimodal neural script knowledge models","volume":"34","author":"Zellers","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref106","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01759"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.585"},{"article-title":"Query-conditioned three-player adversarial network for video summarization","volume-title":"British Machine Vision Conference (BMVC)","author":"Zhang","key":"ref108"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01340"},{"key":"ref110","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00637"},{"key":"ref111","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12342"},{"key":"ref112","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00877"}],"event":{"name":"2023 IEEE\/CVF International Conference on Computer Vision (ICCV)","start":{"date-parts":[[2023,10,1]]},"location":"Paris, France","end":{"date-parts":[[2023,10,6]]}},"container-title":["2023 IEEE\/CVF International Conference on Computer Vision (ICCV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10376473\/10376477\/10378463.pdf?arnumber=10378463","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,17]],"date-time":"2024-01-17T20:43:56Z","timestamp":1705524236000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10378463\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,1]]},"references-count":112,"URL":"https:\/\/doi.org\/10.1109\/iccv51070.2023.00487","relation":{},"subject":[],"published":{"date-parts":[[2023,10,1]]}}}