{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,8]],"date-time":"2024-09-08T11:02:13Z","timestamp":1725793333688},"reference-count":30,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,5]]},"DOI":"10.1109\/icassp.2019.8682650","type":"proceedings-article","created":{"date-parts":[[2019,4,17]],"date-time":"2019-04-17T20:01:56Z","timestamp":1555531316000},"page":"7105-7109","source":"Crossref","is-referenced-by-count":10,"title":["Stream Attention-based Multi-array End-to-end Speech Recognition"],"prefix":"10.1109","author":[{"given":"Xiaofei","family":"Wang","sequence":"first","affiliation":[]},{"given":"Ruizhi","family":"Li","sequence":"additional","affiliation":[]},{"given":"Sri Harish","family":"Mallidi","sequence":"additional","affiliation":[]},{"given":"Takaaki","family":"Hori","sequence":"additional","affiliation":[]},{"given":"Shinji","family":"Watanabe","sequence":"additional","affiliation":[]},{"given":"Hynek","family":"Hermansky","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639693"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472621"},{"key":"ref11","first-page":"577","article-title":"Attention-based models for speech recognition","author":"chorowski","year":"2015","journal-title":"NIPS 2015"},{"key":"ref12","first-page":"1764","article-title":"Towards end-to-end speech recognition with recurrent neural networks","author":"graves","year":"2014","journal-title":"ICML 2014"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2015.7404790"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953075"},{"key":"ref15","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2017-1296","article-title":"Advances in joint CTC-attention based end-to-end speech recognition with a deep CNN encoder and RNN-LM","author":"hori","year":"2017","journal-title":"terspeech 2017"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2017.2763455"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2017.2764276"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1301"},{"article-title":"Multichannel end-to-end speech recognition","year":"2017","author":"ochiai","key":"ref19"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2007.902460"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/CHiME.2018-3"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-731"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1768"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.1997.659110"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1456"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/CHiME.2018-2"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/CHiME.2018-5"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1037"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/IWAENC.2016.7602888"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2015.7404806"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2016.11.005"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1536"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N16-1174"},{"article-title":"Multi-encoder multi-resolution framework for end-to-end speech recognition","year":"2018","author":"li","key":"ref21"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P17-2031"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.450"},{"key":"ref26","first-page":"28","article-title":"The ami meeting corpus: A pre-announcement","author":"carletta","year":"2005","journal-title":"5th International Workshop on Machine Learning for Multimodal Interaction"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639655"}],"event":{"name":"ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2019,5,12]]},"location":"Brighton, United Kingdom","end":{"date-parts":[[2019,5,17]]}},"container-title":["ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8671773\/8682151\/08682650.pdf?arnumber=8682650","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,15]],"date-time":"2022-07-15T03:21:35Z","timestamp":1657855295000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8682650\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,5]]},"references-count":30,"URL":"https:\/\/doi.org\/10.1109\/icassp.2019.8682650","relation":{},"subject":[],"published":{"date-parts":[[2019,5]]}}}