{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,4,23]],"date-time":"2025-04-23T04:27:09Z","timestamp":1745382429622,"version":"3.37.3"},"reference-count":30,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2020,4,1]],"date-time":"2020-04-01T00:00:00Z","timestamp":1585699200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"}],"funder":[{"DOI":"10.13039\/501100002920","name":"Research Grants Council of Hong Kong","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100002920","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Neural Networks"],"published-print":{"date-parts":[[2020,4]]},"DOI":"10.1016\/j.neunet.2020.01.018","type":"journal-article","created":{"date-parts":[[2020,1,25]],"date-time":"2020-01-25T11:34:27Z","timestamp":1579952067000},"page":"319-327","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":166,"special_numbering":"C","title":["Theory of deep convolutional neural networks: Downsampling"],"prefix":"10.1016","volume":"124","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0224-9216","authenticated-orcid":false,"given":"Ding-Xuan","family":"Zhou","sequence":"first","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.neunet.2020.01.018_b1","doi-asserted-by":"crossref","first-page":"930","DOI":"10.1109\/18.256500","article-title":"Universal approximation bounds for superpositions of a sigmoidal function","volume":"39","author":"Barron","year":"1993","journal-title":"IEEE Transaction on Information Theory"},{"key":"10.1016\/j.neunet.2020.01.018_b2","doi-asserted-by":"crossref","first-page":"8","DOI":"10.1137\/18M118709X","article-title":"Optimal approximation with sparsely connected deep neural networks","volume":"1","author":"B\u00f6lcskei","year":"2019","journal-title":"SIAM Journal on Mathematics of Data Science"},{"key":"10.1016\/j.neunet.2020.01.018_b3","doi-asserted-by":"crossref","first-page":"233","DOI":"10.1007\/BF02124745","article-title":"Limitations of the approximation capabilities of neural networks with one hidden layer","volume":"5","author":"Chui","year":"1996","journal-title":"Advances in Computational Mathematics"},{"key":"10.1016\/j.neunet.2020.01.018_b4","doi-asserted-by":"crossref","first-page":"303","DOI":"10.1007\/BF02551274","article-title":"Approximations by superpositions of sigmoidal functions","volume":"2","author":"Cybenko","year":"1989","journal-title":"Mathematics of Control, Signals, and Systems"},{"year":"1992","series-title":"Ten lectures on wavelets","author":"Daubechies","key":"10.1016\/j.neunet.2020.01.018_b5"},{"key":"10.1016\/j.neunet.2020.01.018_b6","doi-asserted-by":"crossref","first-page":"164","DOI":"10.1016\/j.acha.2014.12.005","article-title":"Consistency analysis of an empirical minimum error entropy algorithm","volume":"41","author":"Fan","year":"2016","journal-title":"Applied and Computational Harmonic Analysis"},{"year":"2016","series-title":"Deep learning","author":"Goodfellow","key":"10.1016\/j.neunet.2020.01.018_b7"},{"key":"10.1016\/j.neunet.2020.01.018_b8","doi-asserted-by":"crossref","first-page":"61","DOI":"10.1007\/s00365-001-0009-5","article-title":"On best approximation by ridge functions in the uniform norm","volume":"18","author":"Gordon","year":"2002","journal-title":"Constructive Approximation"},{"key":"10.1016\/j.neunet.2020.01.018_b9","doi-asserted-by":"crossref","first-page":"433","DOI":"10.1142\/S0219530517500026","article-title":"Thresholded spectral algorithms for sparse approximations","volume":"15","author":"Guo","year":"2017","journal-title":"Analysis and Applications"},{"key":"10.1016\/j.neunet.2020.01.018_b10","doi-asserted-by":"crossref","first-page":"1527","DOI":"10.1162\/neco.2006.18.7.1527","article-title":"A fast learning algorithm for deep belief nets","volume":"18","author":"Hinton","year":"2006","journal-title":"Neural Computation"},{"key":"10.1016\/j.neunet.2020.01.018_b11","doi-asserted-by":"crossref","DOI":"10.1016\/0893-6080(89)90020-8","article-title":"Multilayer feedforward networks are universal approximators","volume":"2","author":"Hornik","year":"1989","journal-title":"Neural Networks"},{"key":"10.1016\/j.neunet.2020.01.018_b12","doi-asserted-by":"crossref","first-page":"7649","DOI":"10.1109\/TIT.2018.2874447","article-title":"Approximation by combinations of ReLU and squared ReLU ridge functions with \u21131 and \u21130 controls","volume":"64","author":"Klusowski","year":"2018","journal-title":"IEEE Transactions on Information Theory"},{"key":"10.1016\/j.neunet.2020.01.018_b13","doi-asserted-by":"crossref","first-page":"891","DOI":"10.1080\/10485250500309608","article-title":"Adaptive regression estimation with multilayer feedforward neural networks","volume":"17","author":"Kohler","year":"2005","journal-title":"Journal of Nonparametric Statistics"},{"key":"10.1016\/j.neunet.2020.01.018_b14","series-title":"NIPS","article-title":"Imagenet classification with deep convolutional neural networks","author":"Krizhevsky","year":"2012"},{"key":"10.1016\/j.neunet.2020.01.018_b15","doi-asserted-by":"crossref","first-page":"2278","DOI":"10.1109\/5.726791","article-title":"Gradient-based learning applied to document recognition","volume":"86","author":"LeCun","year":"1998","journal-title":"Proceedings of the IEEE"},{"key":"10.1016\/j.neunet.2020.01.018_b16","doi-asserted-by":"crossref","first-page":"861","DOI":"10.1016\/S0893-6080(05)80131-5","article-title":"Multilayer feedforward networks with a non-polynomial activation function can approximate any function","volume":"6","author":"Leshno","year":"1993","journal-title":"Neural Networks"},{"key":"10.1016\/j.neunet.2020.01.018_b17","doi-asserted-by":"crossref","first-page":"249","DOI":"10.1007\/s00365-017-9379-1","article-title":"Distributed kernel gradient descent algorithms","volume":"47","author":"Lin","year":"2018","journal-title":"Constructive Approximation"},{"key":"10.1016\/j.neunet.2020.01.018_b18","doi-asserted-by":"crossref","DOI":"10.1098\/rsta.2015.0203","article-title":"Understanding deep convolutional networks","volume":"374","author":"Mallat","year":"2016","journal-title":"Philosophical Transactions of the Royal Society of London. Series A"},{"key":"10.1016\/j.neunet.2020.01.018_b19","doi-asserted-by":"crossref","first-page":"61","DOI":"10.1007\/BF02070821","article-title":"Approximation properties of a multilayered feedforward artificial neural network","volume":"1","author":"Mhaskar","year":"1993","journal-title":"Advances in Computational Mathematics"},{"key":"10.1016\/j.neunet.2020.01.018_b20","article-title":"Equivalence of approximation by convolutional neural networks and fully-connected networks","author":"Petersen","year":"2018","journal-title":"Proceedings of the Americal Mathematical Society"},{"key":"10.1016\/j.neunet.2020.01.018_b21","doi-asserted-by":"crossref","first-page":"296","DOI":"10.1016\/j.neunet.2018.08.019","article-title":"Optimal approximation of piecewise smooth functions using deep ReLU neural networks","volume":"108","author":"Petersen","year":"2018","journal-title":"Neural Networks"},{"key":"10.1016\/j.neunet.2020.01.018_b22","doi-asserted-by":"crossref","first-page":"537","DOI":"10.1016\/j.acha.2016.04.003","article-title":"Provable approximation properties for deep neural networks","volume":"44","author":"Shaham","year":"2018","journal-title":"Applied and Computational Harmonic Analysis"},{"year":"2008","series-title":"Support vector machines","author":"Steinwart","key":"10.1016\/j.neunet.2020.01.018_b23"},{"key":"10.1016\/j.neunet.2020.01.018_b24","series-title":"29th annual conference on learning theory, Vol. 49","first-page":"1517","article-title":"Benefits of depth in neural networks","author":"Telgarsky","year":"2016"},{"key":"10.1016\/j.neunet.2020.01.018_b25","doi-asserted-by":"crossref","first-page":"103","DOI":"10.1016\/j.neunet.2017.07.002","article-title":"Error bounds for approximations with deep ReLU networks","volume":"94","author":"Yarotsky","year":"2017","journal-title":"Neural Networks"},{"key":"10.1016\/j.neunet.2020.01.018_b26","doi-asserted-by":"crossref","first-page":"224","DOI":"10.1016\/j.acha.2015.08.007","article-title":"Unregularized online learning algorithms with general loss functions","volume":"42","author":"Ying","year":"2017","journal-title":"Applied and Computational Harmonic Analysis"},{"key":"10.1016\/j.neunet.2020.01.018_b27","first-page":"3299","article-title":"Divide and conquer kernel ridge regression: A distributed algorithm with minimax optimal rates","volume":"16","author":"Zhang","year":"2015","journal-title":"Journal of Machine Learning Research"},{"key":"10.1016\/j.neunet.2020.01.018_b28","doi-asserted-by":"crossref","first-page":"895","DOI":"10.1142\/S0219530518500124","article-title":"Deep distributed convolutional neural networks: universality","volume":"16","author":"Zhou","year":"2018","journal-title":"Analysis and Applications"},{"year":"2018","series-title":"Distributed approximation with deep convolutional neural networks","author":"Zhou","key":"10.1016\/j.neunet.2020.01.018_b29"},{"key":"10.1016\/j.neunet.2020.01.018_b30","doi-asserted-by":"crossref","first-page":"787","DOI":"10.1016\/j.acha.2019.06.004","article-title":"Universality of deep convolutional neural networks","volume":"48","author":"Zhou","year":"2020","journal-title":"Applied and Computational Harmonic Analysis"}],"container-title":["Neural Networks"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0893608020300204?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0893608020300204?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2020,3,4]],"date-time":"2020-03-04T03:21:16Z","timestamp":1583292076000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0893608020300204"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,4]]},"references-count":30,"alternative-id":["S0893608020300204"],"URL":"https:\/\/doi.org\/10.1016\/j.neunet.2020.01.018","relation":{},"ISSN":["0893-6080"],"issn-type":[{"type":"print","value":"0893-6080"}],"subject":[],"published":{"date-parts":[[2020,4]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Theory of deep convolutional neural networks: Downsampling","name":"articletitle","label":"Article Title"},{"value":"Neural Networks","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.neunet.2020.01.018","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2020 Elsevier Ltd. All rights reserved.","name":"copyright","label":"Copyright"}]}}