{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T08:25:32Z","timestamp":1730276732034,"version":"3.28.0"},"reference-count":13,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2016,12]]},"DOI":"10.1109\/isspit.2016.7886039","type":"proceedings-article","created":{"date-parts":[[2017,3,28]],"date-time":"2017-03-28T02:53:28Z","timestamp":1490669608000},"page":"223-228","source":"Crossref","is-referenced-by-count":168,"title":["Learning to communicate: Channel auto-encoders, domain specific regularizers, and attention"],"prefix":"10.1109","author":[{"given":"Timothy J.","family":"O'Shea","sequence":"first","affiliation":[]},{"given":"Kiran","family":"Karra","sequence":"additional","affiliation":[]},{"given":"T. Charles","family":"Clancy","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Rmsprop and equilibrated adaptive learning rates for nonconvex optimization","author":"dauphin","year":"2015","journal-title":"arXiv preprint arXiv 1502 01032"},{"key":"ref11","first-page":"2008","article-title":"Spatial transformer networks","author":"jaderberg","year":"2015","journal-title":"Advances in neural information processing systems"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/SPLIM.2016.7528397"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ACSSC.2016.7869126"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1080\/095400996116811"},{"key":"ref3","first-page":"3","article-title":"Autoencoders, minimum description length, and helmholtz free energy","author":"hinton","year":"1994","journal-title":"Advances in neural information processing systems"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/1390156.1390294"},{"article-title":"Modulation roundup: Error rates, noise, and capacity","year":"2008","author":"pillai","key":"ref5"},{"key":"ref8","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv preprint arXiv 1412 6980"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-21735-7_7"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TCOM.1979.1094370"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1002\/j.1538-7305.1948.tb01338.x"},{"key":"ref9","first-page":"1929","article-title":"Dropout: A simple way to prevent neural networks from overfitting","volume":"15","author":"srivastava","year":"2014","journal-title":"The Journal of Machine Learning Research"}],"event":{"name":"2016 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)","start":{"date-parts":[[2016,12,12]]},"location":"Limassol, Cyprus","end":{"date-parts":[[2016,12,14]]}},"container-title":["2016 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7879518\/7885993\/07886039.pdf?arnumber=7886039","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2017,12,13]],"date-time":"2017-12-13T20:29:12Z","timestamp":1513196952000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/7886039\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2016,12]]},"references-count":13,"URL":"https:\/\/doi.org\/10.1109\/isspit.2016.7886039","relation":{},"subject":[],"published":{"date-parts":[[2016,12]]}}}