{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,23]],"date-time":"2024-09-23T04:31:20Z","timestamp":1727065880321},"reference-count":85,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"11","license":[{"start":{"date-parts":[[2023,11,1]],"date-time":"2023-11-01T00:00:00Z","timestamp":1698796800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,11,1]],"date-time":"2023-11-01T00:00:00Z","timestamp":1698796800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,11,1]],"date-time":"2023-11-01T00:00:00Z","timestamp":1698796800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Sea-NExT Joint Lab"},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61932009"],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2023,11,1]]},"DOI":"10.1109\/tpami.2023.3292266","type":"journal-article","created":{"date-parts":[[2023,7,4]],"date-time":"2023-07-04T17:31:42Z","timestamp":1688491902000},"page":"13265-13280","source":"Crossref","is-referenced-by-count":6,"title":["Contrastive Video Question Answering via Video Graph Transformer"],"prefix":"10.1109","volume":"45","author":[{"ORCID":"http:\/\/orcid.org\/0000-0001-5573-6195","authenticated-orcid":false,"given":"Junbin","family":"Xiao","sequence":"first","affiliation":[{"name":"National University of Singapore, Singapore"}]},{"ORCID":"http:\/\/orcid.org\/0000-0003-3400-8943","authenticated-orcid":false,"given":"Pan","family":"Zhou","sequence":"additional","affiliation":[{"name":"Sea AI Lab, Singapore"}]},{"ORCID":"http:\/\/orcid.org\/0000-0001-7418-6141","authenticated-orcid":false,"given":"Angela","family":"Yao","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore"}]},{"ORCID":"http:\/\/orcid.org\/0000-0002-5659-793X","authenticated-orcid":false,"given":"Yicong","family":"Li","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore"}]},{"ORCID":"http:\/\/orcid.org\/0000-0001-5461-3986","authenticated-orcid":false,"given":"Richang","family":"Hong","sequence":"additional","affiliation":[{"name":"Hefei University of Technology, Hefei, China"}]},{"ORCID":"http:\/\/orcid.org\/0000-0001-8906-3777","authenticated-orcid":false,"given":"Shuicheng","family":"Yan","sequence":"additional","affiliation":[{"name":"Sea AI Lab, Singapore"}]},{"ORCID":"http:\/\/orcid.org\/0000-0001-6097-7807","authenticated-orcid":false,"given":"Tat-Seng","family":"Chua","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6767"},{"key":"ref57","first-page":"740","article-title":"Microsoft COCO: Common objects in context","author":"lin","year":"2014","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00688"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20184"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6737"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00272"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00171"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00210"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00990"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.149"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i8.16822"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3123266.3123427"},{"key":"ref16","first-page":"16877","article-title":"Look before you speak: Visually contextualized utterances","author":"seo","year":"2021","journal-title":"Proc IEEE Conf Comput Vis Pattern Recognit"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3173208"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.544"},{"key":"ref51","article-title":"Rethinking multi-modal alignment in video question answering from feature and sample perspectives","author":"xiao","year":"2022"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58539-6_27"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00172"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6766"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2021\/88"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01527"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20059-5_3"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475193"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33018658"},{"key":"ref43","article-title":"RoBERTa: A robustly optimized BERT pretraining approach","author":"liu","year":"2019"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.481"},{"key":"ref8","first-page":"6000","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Proc 31st Int Conf Neural Inf Process Syst"},{"key":"ref7","first-page":"9694","article-title":"Align before fuse: Vision and language representation learning with momentum distillation","author":"li","year":"2021","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.432"},{"key":"ref4","first-page":"13","article-title":"ViLBERT: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","author":"lu","year":"2019","journal-title":"Proc 33rd Int Conf Neural Inf Process Syst"},{"key":"ref3","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","author":"radford","year":"2021","journal-title":"Proc 38th Int Conf Mach Learn"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00756"},{"key":"ref5","article-title":"VL-BERT: Pre-training of generic visual-linguistic representations","author":"su","year":"2020","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3356082"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/178"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548035"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.279"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01025"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00999"},{"key":"ref35","article-title":"Video swin transformer","author":"liu","year":"2021"},{"key":"ref79","article-title":"VisualBERT: A simple and performant baseline for vision and language","author":"li","year":"2019"},{"key":"ref34","first-page":"813","article-title":"Is space-time attention all you need for video understanding?","author":"bertasius","year":"2021","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref78","first-page":"9112","article-title":"Attention over learned object embeddings enables complex visual reasoning","author":"ding","year":"2021","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00971"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01267-0_19"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00293"},{"key":"ref74","article-title":"Semi-supervised classification with graph convolutional networks","author":"kipf","year":"2017","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref33","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"dosovitskiy","year":"2020","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref77","article-title":"CLEVRER: Collision events for video representation and reasoning","author":"yi","year":"2020","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref32","first-page":"91","article-title":"Faster R-CNN: Towards real-time object detection with region proposal networks","author":"ren","year":"2015","journal-title":"Proc 28th Int Conf Neural Inf Process Syst"},{"key":"ref76","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2015","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref2","article-title":"On the opportunities and risks of foundation models","author":"bommasani","year":"2021"},{"key":"ref1","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2019","journal-title":"Proc Annu Conf North Amer Chapter Assoc Comput Linguistics Hum Lang Technol"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00294"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01251"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01067"},{"key":"ref70","article-title":"Representation learning with contrastive predictive coding","author":"oord","year":"2018"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00718"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00636"},{"key":"ref24","article-title":"STAR: A benchmark for situated reasoning in real-world videos","author":"wu","year":"2021","journal-title":"Proc 35th Conf Neural Inf Process Syst Datasets Benchmarks Track (Round 2)"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.265"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00965"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i14.17556"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00725"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.02059"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00163"},{"key":"ref20","first-page":"487","article-title":"A joint sequence fusion model for video question answering and retrieval","author":"yu","year":"2018","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref64","first-page":"11960","article-title":"Graph transformer networks","author":"yun","year":"2019","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref63","first-page":"28877","article-title":"Do transformers really perform badly for graph representation?","author":"ying","year":"2021","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/3323873.3325056"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i2.16231"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00877"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i1.19922"},{"key":"ref28","first-page":"23634","article-title":"MERLOT: Multimodal neural script knowledge models","author":"zellers","year":"2021","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref27","first-page":"26462","article-title":"Learning from inside: Self-driven Siamese sampling and reasoning for video question answering","author":"yu","year":"2021","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref29","article-title":"VIOLET: End-to-end video-language transformers with masked visual-token modeling","author":"fu","year":"2021"},{"key":"ref60","article-title":"Relational inductive biases, deep learning, and graph networks","author":"battaglia","year":"2018"},{"key":"ref62","article-title":"TCL: Transformer-based dynamic graph modelling via contrastive learning","author":"wang","year":"2021"},{"key":"ref61","first-page":"21618","article-title":"Rethinking graph transformers with spectral attention","author":"kreuzer","year":"2021","journal-title":"Proc Int Conf Neural Inf Process"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/34\/10269680\/10172254.pdf?arnumber=10172254","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,4]],"date-time":"2023-10-04T17:41:11Z","timestamp":1696441271000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10172254\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,1]]},"references-count":85,"journal-issue":{"issue":"11"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2023.3292266","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,11,1]]}}}