{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,8]],"date-time":"2024-09-08T15:25:26Z","timestamp":1725809126943},"reference-count":42,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,12,1]],"date-time":"2019-12-01T00:00:00Z","timestamp":1575158400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,12,1]],"date-time":"2019-12-01T00:00:00Z","timestamp":1575158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,12,1]],"date-time":"2019-12-01T00:00:00Z","timestamp":1575158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,12]]},"DOI":"10.1109\/asru46091.2019.9003913","type":"proceedings-article","created":{"date-parts":[[2020,2,21]],"date-time":"2020-02-21T07:01:33Z","timestamp":1582268493000},"page":"920-927","source":"Crossref","is-referenced-by-count":61,"title":["Recognizing Long-Form Speech Using Streaming End-to-End Models"],"prefix":"10.1109","author":[{"given":"Arun","family":"Narayanan","sequence":"first","affiliation":[]},{"given":"Rohit","family":"Prabhavalkar","sequence":"additional","affiliation":[]},{"given":"Chung-Cheng","family":"Chiu","sequence":"additional","affiliation":[]},{"given":"David","family":"Rybach","sequence":"additional","affiliation":[]},{"given":"Tara N.","family":"Sainath","sequence":"additional","affiliation":[]},{"given":"Trevor","family":"Strohman","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"journal-title":"Adam A method for stochastic optimization","year":"2014","author":"kingma","key":"ref39"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/3079856.3080246"},{"journal-title":"Transformer-xl Attentive language models beyond a fixed-length context","year":"2019","author":"dai","key":"ref33"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1016\/0893-6080(88)90007-X"},{"key":"ref31","article-title":"Investigation of Transfer Learning for ASR Using LF-MMI Trained Neural Networks","author":"ghahremani","year":"2017","journal-title":"Proc of ASRU"},{"key":"ref30","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2017-1510","article-title":"Generation of Large-Scale Simulated Utterances in Virtual Rooms to Train Deep-Neural Networks for Far-Field Speech Recognition in Google Home","author":"kim","year":"2017","journal-title":"Proc of Interspeech"},{"journal-title":"Lingvo a Modular and Scalable Framework for Sequence-to-Sequence Modeling","year":"2019","author":"shen","key":"ref37"},{"key":"ref36","article-title":"Tensor-flow: A System for Large-scale Machine Learning","author":"abadi","year":"2016","journal-title":"Proc of OSDI"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682336"},{"key":"ref34","article-title":"Feature Learning in Deep Neural Networks - Studies on Speech Recognition Tasks","author":"yu","year":"2013","journal-title":"Proc of ICLR"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472621"},{"key":"ref11","article-title":"A Comparison of Sequence-to-Sequence Models for Speech Recognition","author":"prabhavalkar","year":"2017","journal-title":"Proc of Interspeech"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2013.6707758"},{"key":"ref12","article-title":"Exploring Neural Transducers for End-to-end Speech Recognition","author":"battenberg","year":"2017","journal-title":"Proc of ASRU"},{"key":"ref13","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2017-1296","article-title":"Advances in Joint CTC-Attention Based End-to-End Speech Recognition with a Deep CNN Encoder and RNN-LM","author":"hori","year":"2017","journal-title":"Proc of Interspeech"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1616"},{"journal-title":"SpecAugment A simple data augmentation method for automatic speech recognition","year":"2019","author":"park","key":"ref15"},{"key":"ref16","article-title":"Exploring Architectures, Data and Units for Streaming End-to-End Speech Recognition with RNN-Transducer","author":"rao","year":"2017","journal-title":"Proc of ASRU"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462105"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1341"},{"key":"ref19","article-title":"To-ward Domain-Invariant Speech Recognition via Large Scale Training","author":"narayanan","year":"2018","journal-title":"Proc of SLT"},{"key":"ref4","article-title":"Deep Speech 2: End-to-End Speech Recognition in English and Mandarin","author":"amodei","year":"2016","journal-title":"Proc of ICML"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1142\/S0218488598000094"},{"journal-title":"Deep speech Scaling up end-to-end speech recognition","year":"2014","author":"hannun","key":"ref3"},{"journal-title":"The HARPY speech recognition system","year":"1976","author":"lowerre","key":"ref27"},{"key":"ref6","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2017-546","article-title":"Direct Acoustics-to-Word Models for English Conversational Speech Recognition","author":"audhkhasi","year":"2017","journal-title":"Proc of Interspeech"},{"key":"ref5","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2017-1566","article-title":"Neural Speech Recognizer: Acoustic-to-Word LSTM Model for Large Vocabulary Speech Recognition","author":"soltau","year":"2017","journal-title":"Proc of Interspeech"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-1475"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638947"},{"journal-title":"Sequence transduction with recurrent neural networks","year":"2012","author":"graves","key":"ref7"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/79.382443"},{"key":"ref9","article-title":"Attention-Based Models for Speech Recognition","author":"chorowski","year":"2015","journal-title":"Proc of NIPS"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2012.2205597"},{"journal-title":"A neural transducer","year":"2015","author":"jaitly","key":"ref20"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003854"},{"key":"ref21","article-title":"Monotonic Chunkwise Attention","author":"chiu","year":"2018","journal-title":"Proc of ICLR"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-264"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2012.6289079"},{"journal-title":"The Last of the Chiefs A Story of the Great Sioux War","year":"1909","author":"altsheler","key":"ref41"},{"key":"ref26","article-title":"Sequence to Sequence Learning with Neural Networks","author":"sutskever","year":"2014","journal-title":"Proc of NIPS"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"}],"event":{"name":"2019 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","start":{"date-parts":[[2019,12,14]]},"location":"SG, Singapore","end":{"date-parts":[[2019,12,18]]}},"container-title":["2019 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8985378\/9003727\/09003913.pdf?arnumber=9003913","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T14:44:17Z","timestamp":1658155457000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9003913\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,12]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/asru46091.2019.9003913","relation":{},"subject":[],"published":{"date-parts":[[2019,12]]}}}