{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T14:48:59Z","timestamp":1730213339419,"version":"3.28.0"},"reference-count":122,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,6,16]],"date-time":"2024-06-16T00:00:00Z","timestamp":1718496000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,16]],"date-time":"2024-06-16T00:00:00Z","timestamp":1718496000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,6,16]]},"DOI":"10.1109\/cvpr52733.2024.01335","type":"proceedings-article","created":{"date-parts":[[2024,9,16]],"date-time":"2024-09-16T17:34:53Z","timestamp":1726508093000},"page":"14076-14088","source":"Crossref","is-referenced-by-count":2,"title":["Jack of All Tasks, Master of Many: Designing General-purpose Coarse-to-Fine Vision-Language Model"],"prefix":"10.1109","author":[{"given":"Shraman","family":"Pramanick","sequence":"first","affiliation":[{"name":"Johns Hopkins University"}]},{"given":"Guangxing","family":"Han","sequence":"additional","affiliation":[{"name":"Meta"}]},{"given":"Rui","family":"Hou","sequence":"additional","affiliation":[{"name":"Meta"}]},{"given":"Sayan","family":"Nag","sequence":"additional","affiliation":[{"name":"University of Toronto"}]},{"given":"Ser-Nam","family":"Lim","sequence":"additional","affiliation":[{"name":"University of Central Florida"}]},{"given":"Nicolas","family":"Ballas","sequence":"additional","affiliation":[{"name":"Meta"}]},{"given":"Qifan","family":"Wang","sequence":"additional","affiliation":[{"name":"Meta"}]},{"given":"Rama","family":"Chellappa","sequence":"additional","affiliation":[{"name":"Johns Hopkins University"}]},{"given":"Amjad","family":"Almahairi","sequence":"additional","affiliation":[{"name":"Meta"}]}],"member":"263","reference":[{"key":"ref1","first-page":"23716","article-title":"Flamingo: a visual language model for few-shot learning","author":"Alayrac","year":"2022","journal-title":"NeurIPS"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.279"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2010.5540080"},{"key":"ref4","first-page":"2206","article-title":"Improving language models by retrieving from trillions of tokens","volume-title":"ICML","author":"Borgeaud","year":"2022"},{"key":"ref5","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"NeurIPS"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-20870-7_27"},{"journal-title":"Minigpt-v2: large language model as a unified interface for vision-language multi-task learning","year":"2023","author":"Chen","key":"ref7"},{"journal-title":"Shikra: Unleashing multi-modal llms referential dialogue magic","year":"2023","author":"Chen","key":"ref8"},{"key":"ref9","article-title":"Pix2seq: A language modeling framework for object detection","author":"Chen","year":"2021","journal-title":"ICLR"},{"key":"ref10","first-page":"31333","article-title":"A unified sequence interface for vision tasks","volume":"35","author":"Chen","year":"2022","journal-title":"NeurIPS"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_7"},{"volume-title":"Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt quality","year":"2023","author":"Chiang","key":"ref12"},{"journal-title":"Palm: Scaling language modeling with pathways","year":"2022","author":"Chowdhery","key":"ref13"},{"key":"ref14","article-title":"Instructblip: Towards general-purpose vision-language models with instruction tuning","author":"Dai","year":"2023","journal-title":"NeurIPS"},{"key":"ref15","first-page":"32942","article-title":"Coarse-to-fine vision-language pre-training with fusion in the backbone","volume":"35","author":"Dou","year":"2022","journal-title":"NeurIPS"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01763"},{"key":"ref17","first-page":"5547","article-title":"Glam: Efficient scaling of language models with mixture-of-experts","volume-title":"ICML","author":"Du","year":"2022"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-emnlp.179"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.164"},{"key":"ref20","first-page":"6616","article-title":"Large-scale adversarial training for vision-and-language representation learning","author":"Gan","year":"2020","journal-title":"NeurIPS"},{"journal-title":"Llama-adapter v2: Parameter-efficient visual instruction model","year":"2023","author":"Gao","key":"ref21"},{"journal-title":"Grec: Generalized referring expression comprehension","year":"2023","author":"He","key":"ref22"},{"volume-title":"Image captioning: Transforming objects into words","year":"2019","author":"Herdade","key":"ref23"},{"key":"ref24","first-page":"30016","article-title":"An empirical analysis of compute-optimal large language model training","volume":"35","author":"Hoffmann","year":"2022","journal-title":"NeurIPS"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3295748"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.470"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01278"},{"key":"ref28","first-page":"6700","article-title":"Gqa: A new dataset for real-world visual reasoning and compositional question answering","year":"2019","journal-title":"CVPR"},{"journal-title":"Opt-iml: Scaling language model instruction meta learning through the lens of generalization","year":"2022","author":"Iyer","key":"ref29"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2016.2576283"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.413"},{"journal-title":"From clip to dino: Visual encoders shout in multi-modal large language models","year":"2023","author":"Jiang","key":"ref32"},{"journal-title":"From clip to dino: Visual encoders shout in multi-modal large language models","year":"2023","author":"Jiang","key":"ref33"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00973"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00180"},{"key":"ref36","first-page":"2611","article-title":"The hateful memes challenge: Detecting hate speech in multimodal memes","author":"Kiela","year":"2020","journal-title":"NeurIPS"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"journal-title":"Lisa: Reasoning segmentation via large language model","year":"2023","author":"Lai","key":"ref38"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00861"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3212906"},{"journal-title":"Otter: A multi-modal model with in-context instruction tuning","year":"2023","author":"Li","key":"ref41"},{"key":"ref42","article-title":"Llava-med: Training a large language-and-vision assistant for biomedicine in one day","author":"Li","year":"2023","journal-title":"NeurIPS"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6795"},{"key":"ref44","first-page":"9694","article-title":"Align before fuse: Vision and language representation learning with momentum distillation","author":"Li","year":"2021","journal-title":"NeurIPS"},{"key":"ref45","article-title":"BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models","author":"Li","year":"2023","journal-title":"ICML"},{"journal-title":"Visualbert: A simple and performant baseline for vision and language","year":"2019","author":"Li","key":"ref46"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01069"},{"key":"ref48","first-page":"19652","article-title":"Referring transformer: A one-step approach to multi-task visual grounding","volume":"34","author":"Li","year":"2021","journal-title":"NeurIPS"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-20893-6_40"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_8"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.20"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02240"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00262"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref55","article-title":"Instance-specific feature propagation for referring segmentation","author":"Liu","year":"2022","journal-title":"IEEE TMM"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02259"},{"key":"ref57","article-title":"Visual instruction tuning","author":"Liu","year":"2023","journal-title":"NeurIPS"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01789"},{"journal-title":"Internchat: Solving vision-centric tasks by interacting with chatbots beyond language","year":"2023","author":"Liu","key":"ref59"},{"key":"ref60","article-title":"Sgdr: Stochastic gradient descent with warm restarts","author":"Loshchilov","year":"2017","journal-title":"ICLR"},{"key":"ref61","article-title":"Decoupled weight decay regularization","author":"Loshchilov","year":"2019","journal-title":"ICLR"},{"key":"ref62","article-title":"Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","volume":"32","author":"Lu","year":"2019","journal-title":"NeurIPS"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01045"},{"key":"ref64","article-title":"Unified-io: A unified model for vision, language, and multi-modal tasks","author":"Lu","year":"2022","journal-title":"ICLR"},{"key":"ref65","article-title":"Iconqa: A new benchmark for abstract diagram understanding and visual language reasoning","author":"Lu","year":"2021","journal-title":"NeurIPS Datasets and Benchmarks Track"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3414006"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01005"},{"journal-title":"Point and ask: Incorporating pointing into visual question answering","year":"2020","author":"Mani","key":"ref68"},{"journal-title":"Clipcap: Clip prefix for image captioning","year":"2021","author":"Mokady","key":"ref69"},{"journal-title":"Gpt-4 technical report","year":"2023","key":"ref70"},{"journal-title":"openai","article-title":"Chatgpt: Optimizing language models for dialogue","year":"2022","key":"ref71"},{"journal-title":"The refinedweb dataset for falcon llm: outperforming curated corpora with web data, and web data only","year":"2023","author":"Penedo","key":"ref72"},{"journal-title":"Kosmos-2: Grounding multimodal large language models to the world","year":"2023","author":"Peng","key":"ref73"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.303"},{"key":"ref75","article-title":"Volta: Vision-language transformer with weakly-supervised local-feature alignment","author":"Pramanick","year":"2023","journal-title":"TMLR"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00487"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.81"},{"key":"ref78","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"ICML","author":"Radford","year":"2021"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.131"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2006.91"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2013.253"},{"journal-title":"Laion-400m: Open dataset of clip-filtered 400 million image-text pairs","year":"2021","author":"Schuhmann","key":"ref82"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00851"},{"key":"ref84","article-title":"Vl-bert: Pre-training of generic visual-linguistic representations","author":"Su","year":"2019","journal-title":"ICLR"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P17-2034"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1644"},{"journal-title":"Eva-clip: Improved training techniques for clip at scale","year":"2023","author":"Sun","key":"ref87"},{"volume-title":"Galactica: A large language model for science","year":"2022","author":"Taylor","key":"ref88"},{"journal-title":"Llama: Open and efficient foundation language models","year":"2023","author":"Touvron","key":"ref89"},{"journal-title":"Llama 2: Open foundation and fine-tuned chat models","year":"2023","author":"Touvron","key":"ref90"},{"key":"ref91","first-page":"200","article-title":"Multimodal few-shot learning with frozen language models","volume":"34","author":"Tsimpoukelli","year":"2021","journal-title":"NeurIPS"},{"key":"ref92","article-title":"Multitask prompted training enables zero-shot task generalization","author":"Victor","year":"2022","journal-title":"ICLR"},{"key":"ref93","first-page":"23318","article-title":"Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework","volume-title":"ICML","author":"Wang","year":"2022"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01838"},{"key":"ref95","article-title":"Visionllm: Large language model is also an open-ended decoder for vision-centric tasks","author":"Wang","year":"2023","journal-title":"NeurIPS"},{"journal-title":"Cogvlm: Visual expert for pretrained language models","year":"2023","author":"Wang","key":"ref96"},{"journal-title":"The all-seeing project: Towards panoptic visual recognition and understanding of the open world","year":"2023","author":"Wang","key":"ref97"},{"key":"ref98","first-page":"8483","article-title":"Language models with image descriptors are strong few-shot video-language learners","volume":"35","author":"Wang","year":"2022","journal-title":"NeurIPS"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01139"},{"key":"ref100","article-title":"Finetuned language models are zero-shot learners","author":"Wei","year":"2021","journal-title":"ICLR"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2005.171"},{"journal-title":"Bloom: A 176b-parameter open-access multi-lingual language model","year":"2022","author":"Workshop","key":"ref102"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00248"},{"key":"ref104","first-page":"124","article-title":"Zero-shot video question answering via frozen bidirectional language models","volume":"35","author":"Yang","year":"2022","journal-title":"NeurIPS"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20059-5_30"},{"key":"ref106","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20215"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01762"},{"key":"ref108","first-page":"9","author":"Yang","year":"2023","journal-title":"The dawn of lmms: Preliminary explorations with gpt-4v (ision)"},{"journal-title":"Mm-react: Prompting chatgpt for multimodal reasoning and action","year":"2023","author":"Yang","key":"ref109"},{"journal-title":"mplug-owl: Modularization empowers large language models with multimodality","year":"2023","author":"Ye","key":"ref110"},{"key":"ref111","article-title":"Ferret: Refer and ground anything anywhere at any granularity","author":"You","year":"2024","journal-title":"ICLR"},{"key":"ref112","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00142"},{"key":"ref113","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00688"},{"key":"ref114","article-title":"Glm-130b: An open bilingual pre-trained model","author":"Zeng","year":"2022","journal-title":"ICLR"},{"key":"ref115","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3087401"},{"key":"ref116","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6977"},{"journal-title":"Opt: Open pre-trained transformer language models","year":"2022","author":"Zhang","key":"ref117"},{"journal-title":"Gpt4roi: Instruction tuning large language model on region-of-interest","year":"2023","author":"Zhang","key":"ref118"},{"journal-title":"Bubogpt: Enabling visual grounding in multi-modal llms","year":"2023","author":"Zhao","key":"ref119"},{"key":"ref120","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19833-5_35"},{"key":"ref121","article-title":"Minigpt-4: Enhancing vision-language understanding with advanced large language models","author":"Zhu","year":"2024","journal-title":"ICLR"},{"key":"ref122","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.540"}],"event":{"name":"2024 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","start":{"date-parts":[[2024,6,16]]},"location":"Seattle, WA, USA","end":{"date-parts":[[2024,6,22]]}},"container-title":["2024 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10654794\/10654797\/10655433.pdf?arnumber=10655433","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,20]],"date-time":"2024-09-20T05:28:31Z","timestamp":1726810111000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10655433\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,16]]},"references-count":122,"URL":"https:\/\/doi.org\/10.1109\/cvpr52733.2024.01335","relation":{},"subject":[],"published":{"date-parts":[[2024,6,16]]}}}