{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,12,18]],"date-time":"2024-12-18T05:21:22Z","timestamp":1734499282032,"version":"3.30.2"},"reference-count":47,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100004807","name":"German Research Foundation (DFG)","doi-asserted-by":"publisher","award":["TRR 169"],"id":[{"id":"10.13039\/100004807","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,7,18]]},"DOI":"10.1109\/ijcnn55064.2022.9892053","type":"proceedings-article","created":{"date-parts":[[2022,9,30]],"date-time":"2022-09-30T19:56:04Z","timestamp":1664567764000},"page":"1-8","source":"Crossref","is-referenced-by-count":1,"title":["Continuous Phoneme Recognition based on Audio-Visual Modality Fusion"],"prefix":"10.1109","author":[{"given":"Julius","family":"Richter","sequence":"first","affiliation":[{"name":"Universität Hamburg,Signal Processing (SP),Hamburg,Germany"}]},{"given":"Jeanine","family":"Liebold","sequence":"additional","affiliation":[{"name":"Universität Hamburg,Signal Processing (SP),Hamburg,Germany"}]},{"given":"Timo","family":"Gerkmann","sequence":"additional","affiliation":[{"name":"Universität Hamburg,Signal Processing (SP),Hamburg,Germany"}]}],"member":"263","reference":[{"volume-title":"The handbook of speech perception","year":"2008","author":"Pisoni","key":"ref1"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.2200\/S00473ED1V01Y201301SAP011"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1186\/s13634-015-0256-4"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3397271.3401050"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/S0020-0255(03)00163-4"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3066303"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/s11633-021-1293-0"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1038\/264746a0"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2017.07.001"},{"key":"ref10","article-title":"Multi modal deep learning","author":"Ngiam","year":"2011","journal-title":"ICML"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2003.817150"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_38"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.5120\/ijca2016909693"},{"key":"ref14","first-page":"39","article-title":"Deep belief networks for phone recognition","volume-title":"Nips workshop on deep learning for speech recognition and related applications","author":"Mohamed","year":"2009"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/BF02943243"},{"key":"ref16","article-title":"First-pass large vocabulary continuous speech recognition using bi-directional recurrent DNNs","author":"Hannun","year":"2014","journal-title":"arXiv preprint"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683713"},{"key":"ref18","first-page":"12449","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","volume":"33","author":"Baevski","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1456"},{"key":"ref20","article-title":"NeMo: a toolkit for building AI applications using neural modules","author":"Kuchaiev","year":"2019","journal-title":"arXiv preprint"},{"key":"ref21","first-page":"1","article-title":"Robust phoneme recognition with little data","volume":"74","author":"Shulby","year":"2019","journal-title":"Symposium on Languages, Applications and Technologies (SLATE 2019)"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053266"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1142\/S0218001418560074"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-860"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3107946"},{"volume-title":"Deep learning","year":"2016","author":"Goodfellow","key":"ref26"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2014-293"},{"key":"ref28","article-title":"LipNet: End-to-end sentence-level lipreading","author":"Assael","year":"2016","journal-title":"arXiv preprint"},{"key":"ref29","article-title":"Learning audio-visual speech representation by masked multimodal cluster prediction","volume-title":"International Conference on Learning Representations (ICLR)","author":"Shi","year":"2022"},{"journal-title":"LRS3-TED: a large-scale dataset for visual speech recognition","year":"2018","author":"Afouras","key":"ref30"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ReTIS.2015.7232917"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2889052"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054528"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3242969.3243014"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.23919\/Eusipco47968.2020.9287841"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2020.2980956"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00745"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2015.2407694"},{"volume-title":"Speechreading (Lipreading)","year":"1971","author":"Jeffers","key":"ref40"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2010-774"},{"issue":"146","key":"ref42","first-page":"10","article-title":"Converting video formats with ffmpeg","volume":"2006","author":"Tomar","year":"2006","journal-title":"Linux Journal"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00949"},{"key":"ref44","article-title":"Phoneme-to-viseme mapping for visual speech recognition","author":"Cappelletta","year":"2012","journal-title":"ICPRAM"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511486395.002"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682566"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2017.2783545"}],"event":{"name":"2022 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2022,7,18]]},"location":"Padua, Italy","end":{"date-parts":[[2022,7,23]]}},"container-title":["2022 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9891857\/9889787\/09892053.pdf?arnumber=9892053","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,17]],"date-time":"2024-12-17T06:18:54Z","timestamp":1734416334000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9892053\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,18]]},"references-count":47,"URL":"https:\/\/doi.org\/10.1109\/ijcnn55064.2022.9892053","relation":{},"subject":[],"published":{"date-parts":[[2022,7,18]]}}}