{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,8,16]],"date-time":"2024-08-16T06:31:04Z","timestamp":1723789864255},"reference-count":44,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10448257","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"source":"Crossref","is-referenced-by-count":2,"title":["Dynamic-Superb: Towards a Dynamic, Collaborative, and Comprehensive Instruction-Tuning Benchmark For Speech"],"prefix":"10.1109","author":[{"given":"Chien-Yu","family":"Huang","sequence":"first","affiliation":[{"name":"National Taiwan University,Taiwan"}]},{"given":"Ke-Han","family":"Lu","sequence":"additional","affiliation":[{"name":"National Taiwan University,Taiwan"}]},{"given":"Shih-Heng","family":"Wang","sequence":"additional","affiliation":[{"name":"National Taiwan University,Taiwan"}]},{"given":"Chi-Yuan","family":"Hsiao","sequence":"additional","affiliation":[{"name":"National Taiwan University,Taiwan"}]},{"given":"Chun-Yi","family":"Kuan","sequence":"additional","affiliation":[{"name":"National Taiwan University,Taiwan"}]},{"given":"Haibin","family":"Wu","sequence":"additional","affiliation":[{"name":"National Taiwan University,Taiwan"}]},{"given":"Siddhant","family":"Arora","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,USA"}]},{"given":"Kai-Wei","family":"Chang","sequence":"additional","affiliation":[{"name":"National Taiwan University,Taiwan"}]},{"given":"Jiatong","family":"Shi","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,USA"}]},{"given":"Yifan","family":"Peng","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,USA"}]},{"given":"Roshan","family":"Sharma","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,USA"}]},{"given":"Shinji","family":"Watanabe","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,USA"}]},{"given":"Bhiksha","family":"Ramakrishnan","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,USA"}]},{"given":"Shady","family":"Shehata","sequence":"additional","affiliation":[{"name":"Mohamed bin Zayed University of Artificial Intelligence,United Arab Emirates"}]},{"given":"Hung-Yi","family":"Lee","sequence":"additional","affiliation":[{"name":"National Taiwan University,Taiwan"}]}],"member":"263","reference":[{"article-title":"Representation learning with contrastive predictive coding","year":"2018","author":"van den Oord","key":"ref1"},{"key":"ref2","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","author":"Baevski","year":"2020","journal-title":"NeurIPS"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3207050"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10610"},{"article-title":"Speechprompt v2: Prompt tuning for speech classification tasks","year":"2023","author":"Chang","key":"ref6"},{"article-title":"Speechgen: Unlocking the generative power of speech language models with prompts","year":"2023","author":"Wu","key":"ref7"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-short.8"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2023.08.012"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.346"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-2032"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10023274"},{"article-title":"Listen, think, and understand","year":"2023","author":"Gong","key":"ref15"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-naacl.199"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.172"},{"article-title":"Finetuned language models are zero-shot learners","volume-title":"ICLR","author":"Wei","key":"ref18"},{"article-title":"Lms with a voice: Spoken language modeling beyond speech tokens","year":"2023","author":"Nachmani","key":"ref19"},{"article-title":"Audiopalm: A large language model that can speak and listen","year":"2023","author":"Rubenstein","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i21.30570"},{"article-title":"The zero resource speech benchmark 2021: Metrics and baselines for unsupervised spoken language modeling","volume-title":"NeuRIPS Workshop on Self-Supervised Learning for Speech and Audio Processing","author":"Nguyen","key":"ref22"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-1775"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746137"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-556"},{"article-title":"Esb: A benchmark for multi-domain end-to-end speech recognition","year":"2022","author":"Gandhi","key":"ref26"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10023141"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1316"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10023234"},{"article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","volume-title":"NAACL","author":"Devlin","key":"ref30"},{"key":"ref31","article-title":"On generative spoken language modeling from raw audio","author":"Lakhotia","year":"2021","journal-title":"Transactions of the Association for Computational Linguistics"},{"article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"ICML","author":"Radford","key":"ref32"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01457"},{"article-title":"Llama: Open and efficient foundation language models","year":"2023","author":"Touvron","key":"ref34"},{"article-title":"Llama 2: Open foundation and fine-tuned chat models","year":"2023","author":"Touvron","key":"ref35"},{"journal-title":"Large language model","article-title":"Chatgpt (august 3 version)","year":"2023","key":"ref36"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"article-title":"The lj speech dataset","year":"2017","author":"Ito","key":"ref38"},{"article-title":"CSTR VCTK Corpus: English multi-speaker corpus for CSTR voice cloning toolkit (version 0.92)","year":"2019","author":"Yamagishi","key":"ref39"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-2193"},{"article-title":"Imagebind-llm: Multi-modality instruction tuning","year":"2023","author":"Han","key":"ref41"},{"article-title":"Llama-adapter v2: Parameter-efficient visual instruction model","year":"2023","author":"Gao","key":"ref42"},{"article-title":"Llama-adapter: Efficient fine-tuning of language models with zero-init attention","year":"2023","author":"Zhang","key":"ref43"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1799"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2024,4,14]]},"location":"Seoul, Korea, Republic of","end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10448257.pdf?arnumber=10448257","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,3]],"date-time":"2024-08-03T04:42:46Z","timestamp":1722660166000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10448257\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":44,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10448257","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}