{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T14:41:38Z","timestamp":1730212898714,"version":"3.28.0"},"reference-count":51,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,1]],"date-time":"2023-06-01T00:00:00Z","timestamp":1685577600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,1]],"date-time":"2023-06-01T00:00:00Z","timestamp":1685577600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6]]},"DOI":"10.1109\/cvpr52729.2023.01832","type":"proceedings-article","created":{"date-parts":[[2023,8,22]],"date-time":"2023-08-22T17:30:52Z","timestamp":1692725452000},"page":"19113-19122","source":"Crossref","is-referenced-by-count":122,"title":["MaPLe: Multi-modal Prompt Learning"],"prefix":"10.1109","author":[{"given":"Muhammad Uzair","family":"Khattak","sequence":"first","affiliation":[{"name":"Mohamed bin Zayed University of AI"}]},{"given":"Hanoona","family":"Rasheed","sequence":"additional","affiliation":[{"name":"Mohamed bin Zayed University of AI"}]},{"given":"Muhammad","family":"Maaz","sequence":"additional","affiliation":[{"name":"Mohamed bin Zayed University of AI"}]},{"given":"Salman","family":"Khan","sequence":"additional","affiliation":[{"name":"Mohamed bin Zayed University of AI"}]},{"given":"Fahad Shahbaz","family":"Khan","sequence":"additional","affiliation":[{"name":"Mohamed bin Zayed University of AI"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01501"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00823"},{"key":"ref15","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","author":"jia","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref14","article-title":"Unsupervised prompt learning for vision-language models","author":"huang","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/JSTARS.2019.2918242"},{"key":"ref10","article-title":"Open-vocabulary object detection via vision and language knowledge distillation","author":"gu","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref17","article-title":"A good prompt is worth millions of parameters? low-resource prompt-based learning for vision-language models","author":"jin","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref16","article-title":"Vi-sual prompt tuning","author":"jia","year":"0","journal-title":"The European Conference on Computer Vision"},{"journal-title":"How to adapt your large-scale vision-and-language model","year":"2022","author":"kim","key":"ref19"},{"key":"ref18","article-title":"Prompting visual-language models for efficient video understanding","author":"ju","year":"0","journal-title":"The European Conference on Computer Vision"},{"key":"ref51","article-title":"Prompt-aligned gradient for prompt tuning","author":"zhu","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref50","doi-asserted-by":"crossref","DOI":"10.1023\/A:1026531017760","article-title":"Detecting twenty-thousand classes using image-level supervision","author":"zhou","year":"0","journal-title":"The European Conference on Computer Vision"},{"key":"ref46","article-title":"Tip-adapter: Training-free clip-adapter for better vision-language modeling","author":"zhang","year":"0","journal-title":"The European Conference on Computer Vision"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01759"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01631"},{"key":"ref47","article-title":"Neural prompt search","author":"zhang","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref42","article-title":"Cpt: Colorful prompt tuning for pretrained vision-language models","author":"yao","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref41","article-title":"Filip: Fine-grained interactive language-image pretraining","author":"yao","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref44","article-title":"Open-vocabulary detr with conditional matching","author":"zang","year":"0","journal-title":"The European Conference on Computer Vision"},{"key":"ref43","article-title":"Florence: A new foundation model for computer vision","author":"yuan","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01653-1"},{"key":"ref8","article-title":"Prompt-det: Towards open-vocabulary detection using uncurated images","author":"feng","year":"0","journal-title":"The European Conference on Computer Vision"},{"key":"ref7","first-page":"178","article-title":"Learning generative visual models from few training examples: An incre-mental bayesian approach tested on 101 object categories","author":"fei-fei","year":"0","journal-title":"2004 Conference on Computer Vision and Pattern Recognition Workshop"},{"key":"ref9","article-title":"Clip-adapter: Better vision-language models with feature adapters","author":"gao","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.461"},{"key":"ref6","article-title":"An image is worth 16x16 words: Trans-formers for image recognition at scale","author":"dosovitskiy","year":"0","journal-title":"International Conference on Learning Representations"},{"key":"ref5","first-page":"11583","article-title":"De-coupling zero-shot semantic segmentation","author":"ding","year":"0","journal-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2010.5539970"},{"key":"ref35","first-page":"5389","article-title":"Do imagenet classifiers generalize to imagenet?","author":"recht","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref34","article-title":"Bridging the gap between object and image-level representations for open-vocabulary detection","author":"rasheed","year":"2022","journal-title":"Advances in Neural Infor-Mation Processing Systems"},{"key":"ref37","article-title":"Learning robust global representations by penalizing local predictive power","volume":"32","author":"wang","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref36","article-title":"Ucf101: A dataset of 101 human actions classes from videos in the wild","author":"soomro","year":"2012","journal-title":"ArXiv Preprint"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2012.6248092"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01755"},{"key":"ref32","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","author":"radford","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref2","first-page":"446","article-title":"Food-10 1-mining discriminative components with random forests","author":"bossard","year":"0","journal-title":"The European Conference on Computer Vision"},{"key":"ref1","article-title":"Visual prompting: Modifying pixel space to adapt pretrained models","author":"bahng","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00024"},{"key":"ref38","article-title":"Dualprompt: Complementary prompting for rehearsal-free continual learning","author":"wang","year":"0","journal-title":"The Eu-ropean Conference on Computer Vision"},{"key":"ref24","article-title":"P-tuning v2: Prompt tuning can be comparable to fine-tuning universally across scales and tasks","author":"liu","year":"0","journal-title":"Proceedings of the 59th Annual Meeting of the Associ-ation for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00695"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00514"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2013.77"},{"key":"ref22","article-title":"Language-driven semantic segmentation","author":"li","year":"0","journal-title":"International Conference on Learning Rep-resentations"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref28","article-title":"Fine-grained visual classification of aircraft","author":"maji","year":"2013","journal-title":"ArXiv Preprint"},{"key":"ref27","article-title":"Class-agnostic object detection with multimodal transformer","author":"maaz","year":"0","journal-title":"The European Conference on Computer Vision"},{"journal-title":"Test-time prompt tuning for zero-shot generalization in vision-language models","year":"2022","author":"manli","key":"ref29"}],"event":{"name":"2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","start":{"date-parts":[[2023,6,17]]},"location":"Vancouver, BC, Canada","end":{"date-parts":[[2023,6,24]]}},"container-title":["2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10203037\/10203050\/10203359.pdf?arnumber=10203359","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,9,11]],"date-time":"2023-09-11T17:50:38Z","timestamp":1694454638000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10203359\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6]]},"references-count":51,"URL":"https:\/\/doi.org\/10.1109\/cvpr52729.2023.01832","relation":{},"subject":[],"published":{"date-parts":[[2023,6]]}}}