{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,22]],"date-time":"2025-03-22T11:50:46Z","timestamp":1742644246243,"version":"3.28.0"},"reference-count":30,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9747320","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T15:50:34Z","timestamp":1651074634000},"page":"8072-8076","source":"Crossref","is-referenced-by-count":15,"title":["End-to-End Speech Summarization Using Restricted Self-Attention"],"prefix":"10.1109","author":[{"given":"Roshan","family":"Sharma","sequence":"first","affiliation":[{"name":"Carnegie Mellon University,Pittsburgh,PA,USA"}]},{"given":"Shruti","family":"Palaskar","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,Pittsburgh,PA,USA"}]},{"given":"Alan W","family":"Black","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,Pittsburgh,PA,USA"}]},{"given":"Florian","family":"Metze","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,Pittsburgh,PA,USA"}]}],"member":"263","reference":[{"year":"2020","author":"gulati","journal-title":"Conformer Convolution-augmented transformer for speech recognition","key":"ref30"},{"year":"0","author":"kitaev","article-title":"Reformer: The efficient transformer","key":"ref10"},{"doi-asserted-by":"publisher","key":"ref11","DOI":"10.18653\/v1\/2020.emnlp-main.19"},{"year":"2020","author":"wang","article-title":"Linformer: Self-attention with linear complexity","key":"ref12"},{"key":"ref13","first-page":"17283","article-title":"Big bird: Transformers for longer sequences","volume":"33","author":"zaheer","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref14","article-title":"Long-former: The long-document transformer","author":"beltagy","year":"2020","journal-title":"CoRR"},{"doi-asserted-by":"publisher","key":"ref15","DOI":"10.21437\/Interspeech.2019-1943"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.21437\/Interspeech.2020-2928"},{"doi-asserted-by":"publisher","key":"ref17","DOI":"10.21437\/Interspeech.2021-1643"},{"doi-asserted-by":"publisher","key":"ref18","DOI":"10.1109\/ICASSP39728.2021.9414928"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.18653\/v1\/2020.acl-main.703"},{"doi-asserted-by":"publisher","key":"ref28","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.18653\/v1\/2020.findings-emnlp.19"},{"doi-asserted-by":"publisher","key":"ref27","DOI":"10.21437\/Interspeech.2019-2680"},{"year":"2021","author":"yu","article-title":"Vision guided generative pretrained language models for multi-modal abstractive summarization","key":"ref3"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.21437\/Interspeech.2020-1683"},{"doi-asserted-by":"publisher","key":"ref29","DOI":"10.1109\/ASRU51503.2021.9687977"},{"year":"2020","author":"rezazadegan","article-title":"Automatic speech summarisation: A scoping review","key":"ref5"},{"doi-asserted-by":"publisher","key":"ref8","DOI":"10.18653\/v1\/P19-1285"},{"doi-asserted-by":"publisher","key":"ref7","DOI":"10.21437\/Interspeech.2021-1923"},{"doi-asserted-by":"publisher","key":"ref2","DOI":"10.18653\/v1\/P19-1659"},{"year":"0","author":"rae","article-title":"Compressive transformers for long-range sequence modelling","key":"ref9"},{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.1109\/2.881692"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1109\/ICASSP39728.2021.9415001"},{"key":"ref22","first-page":"74","article-title":"ROUGE: A package for automatic evaluation of summaries","author":"lin","year":"2004","journal-title":"Text Summarization Branches Out Barcelona Spain"},{"year":"2018","author":"sanabria","article-title":"How2: a large-scale dataset for multimodal language understanding","key":"ref21"},{"year":"0","author":"zhang","article-title":"Bertscore: Evaluating text generation with BERT","key":"ref24"},{"doi-asserted-by":"publisher","key":"ref23","DOI":"10.3115\/v1\/W14-3348"},{"doi-asserted-by":"publisher","key":"ref26","DOI":"10.1109\/ICASSP.2017.7953075"},{"doi-asserted-by":"publisher","key":"ref25","DOI":"10.21437\/Interspeech.2018-1456"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2022,5,23]]},"location":"Singapore, Singapore","end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09747320.pdf?arnumber=9747320","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,22]],"date-time":"2022-08-22T16:13:46Z","timestamp":1661184826000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9747320\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":30,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9747320","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}