{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,4,4]],"date-time":"2025-04-04T08:08:17Z","timestamp":1743754097088,"version":"3.37.3"},"reference-count":36,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"5","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100000781","name":"European Research Council (ERC) Starting Grant BEACON","doi-asserted-by":"publisher","award":["677854"],"id":[{"id":"10.13039\/501100000781","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Wireless Commun."],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/twc.2020.2974748","type":"journal-article","created":{"date-parts":[[2020,2,26]],"date-time":"2020-02-26T21:23:49Z","timestamp":1582752229000},"page":"3546-3557","source":"Crossref","is-referenced-by-count":445,"title":["Federated Learning Over Wireless Fading Channels"],"prefix":"10.1109","volume":"19","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7343-6628","authenticated-orcid":false,"given":"Mohammad Mohammadi","family":"Amiri","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7725-395X","authenticated-orcid":false,"given":"Deniz","family":"Gunduz","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2010.2043769"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511807213"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/LCOMM.2018.2877316"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2016.2621478"},{"key":"ref36","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv 1412 6980"},{"journal-title":"The MNIST Database of Handwritten Digits","year":"1998","author":"lecun","key":"ref35"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.0909892106"},{"key":"ref10","article-title":"DoReFa-Net: Training low bitwidth convolutional neural networks with low bitwidth gradients","author":"zhou","year":"2016","journal-title":"arXiv 1606 06160 [cs]"},{"key":"ref11","article-title":"ATOMO: Communication-efficient learning via atomic sparsification","author":"wang","year":"2018","journal-title":"arXiv 1806 04090"},{"key":"ref12","article-title":"SignSGD: Compressed optimisation for non-convex problems","author":"bernstein","year":"2018","journal-title":"arXiv 1802 04434"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ASPDAC.2018.8297378"},{"key":"ref14","first-page":"1","article-title":"Scalable distributed DNN training using commodity GPU cloud computing","author":"strom","year":"2015","journal-title":"Proc INTERSPEECH"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1045"},{"key":"ref16","article-title":"Deep gradient compression: Reducing the communication bandwidth for distributed training","author":"lin","year":"2018","journal-title":"arXiv 1712 01887"},{"key":"ref17","article-title":"MeProp: Sparsified back propagation for accelerated deep learning with reduced overfitting","author":"sun","year":"2017","journal-title":"arXiv 1706 06197"},{"key":"ref18","article-title":"Sparse binary compression: Towards distributed deep learning with minimal communication","author":"sattler","year":"2018","journal-title":"arXiv 1805 08768"},{"key":"ref19","article-title":"SparCML: High-performance sparse communication for machine learning","author":"renggli","year":"2018","journal-title":"arXiv 1802 08021"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/SPAWC.2019.8815402"},{"key":"ref4","first-page":"1","article-title":"Communication-efficient learning of deep networks from decentralized data","author":"mcmahan","year":"2017","journal-title":"Proc AISTATS"},{"key":"ref27","article-title":"Federated learning via over-the-air computation","author":"yang","year":"2018","journal-title":"arXiv 1812 11750"},{"key":"ref3","article-title":"Randomized distributed mean estimation: Accuracy vs communication","author":"kone?n\u00fd","year":"2016","journal-title":"arXiv 1611 07555"},{"key":"ref6","first-page":"1","article-title":"Deep learning with limited numerical precision","author":"gupta","year":"2015","journal-title":"Proc ICML"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1145\/1851275.1851257"},{"key":"ref5","first-page":"1","article-title":"Federated multi-task learning","author":"smith","year":"2017","journal-title":"Proc Neural Inf Process Syst (NIPS)"},{"key":"ref8","first-page":"1709","article-title":"QSGD: Communication-efficient SGD via randomized quantization and encoding","author":"alistarh","year":"2017","journal-title":"Proc NIPS"},{"key":"ref7","first-page":"1058","article-title":"1-bit stochastic gradient descent and its application to data-parallel distributed training of speech DNNs","author":"seide","year":"2014","journal-title":"Proc INTERSPEECH"},{"journal-title":"Federated learning Collaborative machine learning without centralized training data","year":"2017","author":"mcmahan","key":"ref2"},{"key":"ref9","article-title":"TernGrad: Ternary gradients to reduce communication in distributed deep learning","author":"wen","year":"2017","journal-title":"arXiv 1705 07878"},{"key":"ref1","article-title":"Federated learning: Strategies for improving communication efficiency","author":"kone?n\u00fd","year":"2016","journal-title":"arXiv 1610 05492"},{"key":"ref20","article-title":"The convergence of sparsified gradient methods","author":"alistarh","year":"2018","journal-title":"arXiv 1809 10505"},{"key":"ref22","article-title":"Local SGD converges fast and communicates little","author":"stich","year":"2018","journal-title":"arXiv 1805 09767"},{"key":"ref21","article-title":"Variance-based gradient compression for efficient distributed deep learning","author":"tsuzuku","year":"2018","journal-title":"arXiv 1802 06058"},{"key":"ref24","article-title":"LAG: Lazily aggregated gradient for communication-efficient distributed learning","author":"chen","year":"2018","journal-title":"arXiv 1805 09965"},{"key":"ref23","article-title":"Don’t use large mini-batches, use local SGD","author":"lin","year":"2018","journal-title":"arXiv 1808 07217"},{"key":"ref26","article-title":"Broadband analog aggregation for low-latency federated edge learning (extended version)","author":"zhu","year":"2018","journal-title":"arXiv 1812 11494"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ISIT.2019.8849334"}],"container-title":["IEEE Transactions on Wireless Communications"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7693\/9090043\/09014530.pdf?arnumber=9014530","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T16:27:50Z","timestamp":1651076870000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9014530\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":36,"journal-issue":{"issue":"5"},"URL":"https:\/\/doi.org\/10.1109\/twc.2020.2974748","relation":{},"ISSN":["1536-1276","1558-2248"],"issn-type":[{"type":"print","value":"1536-1276"},{"type":"electronic","value":"1558-2248"}],"subject":[],"published":{"date-parts":[[2020,5]]}}}