{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T14:35:52Z","timestamp":1730212552485,"version":"3.28.0"},"reference-count":59,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,1]],"date-time":"2023-06-01T00:00:00Z","timestamp":1685577600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,1]],"date-time":"2023-06-01T00:00:00Z","timestamp":1685577600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6]]},"DOI":"10.1109\/cvpr52729.2023.00359","type":"proceedings-article","created":{"date-parts":[[2023,8,22]],"date-time":"2023-08-22T17:30:52Z","timestamp":1692725452000},"page":"3687-3697","source":"Crossref","is-referenced-by-count":11,"title":["TinyMIM: An Empirical Study of Distilling MIM Pre-trained Models"],"prefix":"10.1109","author":[{"given":"Sucheng","family":"Ren","sequence":"first","affiliation":[{"name":"Microsoft Research Asia"}]},{"given":"Fangyun","family":"Wei","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia"}]},{"given":"Zheng","family":"Zhang","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia"}]},{"given":"Han","family":"Hu","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia"}]}],"member":"263","reference":[{"journal-title":"Seed Self-supervised distillation for visual representation","year":"2021","author":"fang","key":"ref13"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1145\/3097983.3098135"},{"key":"ref12","article-title":"Corrupted image modeling for self-supervised visual pre-training","author":"fang","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.754"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-021-01453-z"},{"key":"ref59","article-title":"ibot: Image bert pre-training with online tokenizer","author":"zhou","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref14","first-page":"1607","article-title":"Born again neural networks","author":"furlanello","year":"2018","journal-title":"International Conference on Machine Learning"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11636"},{"key":"ref53","doi-asserted-by":"crossref","first-page":"73","DOI":"10.1007\/978-3-031-19830-4_5","article-title":"Factorizing knowledge in neural networks","author":"yang","year":"2022","journal-title":"Computer Vision–ECCV 2022 17th European Conference"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00089"},{"key":"ref11","article-title":"An image is worth 16×16 words: Transformers for image recognition at scale","author":"dosovitskiy","year":"2020","journal-title":"Preprint"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20219"},{"key":"ref10","article-title":"Peco: Perceptual codebook for bert pre-training of vision transformers","author":"dong","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01253"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01204"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33013779"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00201"},{"key":"ref51","article-title":"The modality focusing hypothesis: Towards understanding crossmodal knowledge distillation","author":"xue","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref50","article-title":"On data scaling in masked image modeling","author":"xie","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref46","article-title":"Contrastive learning rivals masked image modeling in fine-tuning via feature distillation","author":"wei","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref45","article-title":"Masked feature prediction for self-supervised visual pre-training","author":"wei","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref48","article-title":"Revealing the dark secrets of masked image modeling","author":"xie","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref47","article-title":"Contrastive learning rivals masked image modeling in fine-tuning via feature distillation","author":"wei","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref42","article-title":"Mobilevitv3: Mobile-friendly vision transformer with simple and effective fusion of local, global and input features","author":"wadekar","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref41","article-title":"Training data-efficient image transformers & distillation through attention","author":"touvron","year":"2020","journal-title":"Preprint"},{"key":"ref44","article-title":"Kdgan: Knowledge distillation with generative adversarial networks","volume":"31","author":"wang","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00943"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00489"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20056-4_7"},{"key":"ref9","first-page":"4171","article-title":"BERT: pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2019","journal-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics Human Language Technologies"},{"key":"ref4","article-title":"Context autoencoder for self-supervised representation learning","author":"chen","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00520"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00950"},{"key":"ref40","article-title":"Siamese image modeling for self-supervised vision representation learning","author":"tao","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01312"},{"key":"ref34","article-title":"Zero-shot text-to-image generation","volume":"abs 2102 12092","author":"ramesh","year":"2021","journal-title":"ArXiv"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01058"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01627"},{"key":"ref31","first-page":"294","article-title":"Edgevits: Competing light-weight cnns on mobile devices with vision transformers","author":"pan","year":"2022","journal-title":"European Conference on Computer Vision"},{"key":"ref30","article-title":"Mobilevit: Light-weight, general-purpose, and mobile-friendly vision transformer","author":"mehta","year":"0","journal-title":"International Conference on Learning Representations"},{"key":"ref33","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","author":"radford","year":"2021","journal-title":"ICML"},{"key":"ref32","article-title":"BEiT v2: Masked image modeling with vector-quantized visual tokenizers","author":"peng","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref2","article-title":"BEiT: BERT pre-training of image transformers","author":"bao","year":"0","journal-title":"International Conference on Learning Representations"},{"key":"ref1","article-title":"Data2vec: A general framework for self-supervised learning in speech, vision and language","author":"baevski","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref39","first-page":"6105","article-title":"Efficientnet: Rethinking model scaling for convolutional neural networks","author":"tan","year":"2019","journal-title":"International Conference on Machine Learning"},{"key":"ref38","article-title":"Fitnets: Hints for thin deep nets","author":"romero","year":"2014","journal-title":"ArXiv Preprint"},{"key":"ref24","article-title":"Paraphrasing complex network: Network compression via factor transfer","volume":"31","author":"kim","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref23","article-title":"Mobilenets: Efficient convolutional neural networks for mobile vision applications","author":"howard","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref26","article-title":"Efficientformer: Vision transformers at mobilenet speed","author":"li","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref25","first-page":"335","article-title":"Self-supervised knowledge distillation using singular value decomposition","author":"hyun lee","year":"0","journal-title":"Proceedings of the European Conference on Computer Vision (ECCV)"},{"key":"ref20","article-title":"Distilling the knowledge in a neural network","volume":"2","author":"hinton","year":"2015","journal-title":"ArXiv Preprint"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00140"},{"key":"ref21","article-title":"Milan: Masked image pretraining on language assisted representation","volume":"abs 2208 6049","author":"hou","year":"2022","journal-title":"ArXiv"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01170"},{"key":"ref27","article-title":"Dataset distillation via factorization","author":"liu","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"}],"event":{"name":"2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","start":{"date-parts":[[2023,6,17]]},"location":"Vancouver, BC, Canada","end":{"date-parts":[[2023,6,24]]}},"container-title":["2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10203037\/10203050\/10204508.pdf?arnumber=10204508","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,9,11]],"date-time":"2023-09-11T18:02:37Z","timestamp":1694455357000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10204508\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6]]},"references-count":59,"URL":"https:\/\/doi.org\/10.1109\/cvpr52729.2023.00359","relation":{},"subject":[],"published":{"date-parts":[[2023,6]]}}}