{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,8]],"date-time":"2024-09-08T15:41:54Z","timestamp":1725810114779},"reference-count":41,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10448217","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"10756-10760","source":"Crossref","is-referenced-by-count":0,"title":["USM-Lite: Quantization and Sparsity Aware Fine-Tuning for Speech Recognition with Universal Speech Models"],"prefix":"10.1109","author":[{"given":"Shaojin","family":"Ding","sequence":"first","affiliation":[{"name":"Google LLC"}]},{"given":"David","family":"Qiu","sequence":"additional","affiliation":[{"name":"Google LLC"}]},{"given":"David","family":"Rim","sequence":"additional","affiliation":[{"name":"Google LLC"}]},{"given":"Yanzhang","family":"He","sequence":"additional","affiliation":[{"name":"Google LLC"}]},{"given":"Oleg","family":"Rybakov","sequence":"additional","affiliation":[{"name":"Google LLC"}]},{"given":"Bo","family":"Li","sequence":"additional","affiliation":[{"name":"Google LLC"}]},{"given":"Rohit","family":"Prabhavalkar","sequence":"additional","affiliation":[{"name":"Google LLC"}]},{"given":"Weiran","family":"Wang","sequence":"additional","affiliation":[{"name":"Google LLC"}]},{"given":"Tara N.","family":"Sainath","sequence":"additional","affiliation":[{"name":"Google LLC"}]},{"given":"Zhonglin","family":"Han","sequence":"additional","affiliation":[{"name":"Google LLC"}]},{"given":"Jian","family":"Li","sequence":"additional","affiliation":[{"name":"Google DeepMind"}]},{"given":"Amir","family":"Yazdanbakhsh","sequence":"additional","affiliation":[{"name":"Google DeepMind"}]},{"given":"Shivani","family":"Agrawal","sequence":"additional","affiliation":[{"name":"Google LLC"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.3390\/sym11081018"},{"journal-title":"Deep speech: Scaling up end-to-end speech recognition","year":"2014","author":"Hannun","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-24797-2"},{"article-title":"Attention-based models for speech recognition","volume-title":"ICONIP","author":"Chorowski","key":"ref4"},{"article-title":"Speech-transformer: a norecurrence sequence-to-sequence model for speech recognition","volume-title":"ICASSP","author":"Dong","key":"ref5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2846"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682336"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462105"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953075"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1873"},{"key":"ref11","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","author":"Baevski","year":"2020","journal-title":"NeurIPS"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"article-title":"Self-supervised learning with random-projection quantizer for speech recognition","volume-title":"ICML","author":"Chiu","key":"ref13"},{"article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"ICML","author":"Radford","key":"ref14"},{"journal-title":"Google usm: Scaling automatic speech recognition beyond 100 languages","year":"2023","author":"Zhang","key":"ref15"},{"journal-title":"Scaling speech technology to 1, 000+ languages","year":"2023","author":"Pratap","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-779"},{"journal-title":"Optimizing speech recognition for the edge","year":"2019","author":"Shangguan","key":"ref18"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403058"},{"key":"ref20","article-title":"Parp: Prune, adjust and re-prune for self-supervised speech recognition","author":"Lai","year":"2021","journal-title":"NeurIPS"},{"article-title":"Audio lottery: Speech recognition made ultra-lightweight, noise-robust, and transferable","volume-title":"ICLR","author":"Ding","key":"ref21"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1962"},{"journal-title":"A simplified fully quantized transformer for end-to-end speech recognition","year":"2019","author":"Bie","key":"ref23"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10809"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1012"},{"journal-title":"Learning n: m fine-grained structured sparse neural networks from scratch","year":"2021","author":"Zhou","key":"ref26"},{"journal-title":"Estimating or propagating gradients through stochastic neurons for conditional computation","year":"2013","author":"Bengio","key":"ref27"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1213"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-11112"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1329"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1285"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472621"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00286"},{"key":"ref36","article-title":"Artificial Intelligence at Google: Our Principles"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-11034"},{"key":"ref39","article-title":"Attention is all you need","author":"Vaswani","year":"2017","journal-title":"NeurIPS"},{"key":"ref40","article-title":"Practical variational inference for neural networks","author":"Graves","year":"2011","journal-title":"NeurIPS"},{"journal-title":"Rand: Robustness aware norm decay for quantized seq2seq models","year":"2023","author":"Qiu","key":"ref41"},{"journal-title":"Learned step size quantization","year":"2019","author":"Esser","key":"ref42"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2024,4,14]]},"location":"Seoul, Korea, Republic of","end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10448217.pdf?arnumber=10448217","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,3]],"date-time":"2024-08-03T04:42:35Z","timestamp":1722660155000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10448217\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10448217","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}