{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T15:39:16Z","timestamp":1730302756549,"version":"3.28.0"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,12,1]],"date-time":"2020-12-01T00:00:00Z","timestamp":1606780800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,12,1]],"date-time":"2020-12-01T00:00:00Z","timestamp":1606780800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,12,1]],"date-time":"2020-12-01T00:00:00Z","timestamp":1606780800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,12,1]]},"DOI":"10.1109\/vcip49819.2020.9301856","type":"proceedings-article","created":{"date-parts":[[2020,12,29]],"date-time":"2020-12-29T16:00:33Z","timestamp":1609257633000},"page":"17-20","source":"Crossref","is-referenced-by-count":1,"title":["Improving Robustness of DNNs against Common Corruptions via Gaussian Adversarial Training"],"prefix":"10.1109","author":[{"given":"Chenyu","family":"Yi","sequence":"first","affiliation":[]},{"given":"Haoliang","family":"Li","sequence":"additional","affiliation":[]},{"given":"Renjie","family":"Wan","sequence":"additional","affiliation":[]},{"given":"Alex C.","family":"Kot","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"13 255","article-title":"A fourier perspective on model robustness in computer vision","author":"yin","year":"2019","journal-title":"Advances in neural information processing systems"},{"article-title":"Towards deep learning models resistant to adversarial attacks","year":"2019","author":"madry","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1995.7.1.108"},{"article-title":"Adversarial examples are a natural consequence of test error in noise","year":"2019","author":"ford","key":"ref13"},{"article-title":"Robustness may be at odds with accuracy","year":"2018","author":"tsipras","key":"ref14"},{"article-title":"Increasing the robustness of dnns against image corruptions by playing the game of noise","year":"2020","author":"rusak","key":"ref15"},{"journal-title":"Robust Statistics Theory and Methods (with R)","year":"2019","author":"maronna","key":"ref16"},{"key":"ref17","first-page":"555","article-title":"A robust minimax approach to classification","volume":"3","author":"lanckriet","year":"2002","journal-title":"Journal of Machine Learning Research"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.1984.1056876"},{"article-title":"Augmix: A simple data processing method to improve robustness and uncertainty","year":"2019","author":"hendrycks","key":"ref19"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/QoMEX.2016.7498955"},{"article-title":"Why do deep convolutional networks generalize so poorly to small image transformations?","year":"2018","author":"azulay","key":"ref3"},{"article-title":"Benchmarking neural network robustness to common corruptions and perturbations","year":"2019","author":"hendrycks","key":"ref6"},{"key":"ref5","first-page":"7538","article-title":"Generalisation in humans and deep neural networks","author":"geirhos","year":"2018","journal-title":"Advances in neural information processing systems"},{"article-title":"Explaining and harnessing adversarial examples","year":"2014","author":"goodfellow","key":"ref8"},{"article-title":"Intriguing properties of neural networks","year":"2013","author":"szegedy","key":"ref7"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1989.1.4.541"},{"key":"ref1","first-page":"1097","article-title":"Imagenet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Advances in neural information processing systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140449"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.244"},{"article-title":"mixup: Beyond empirical risk minimization","year":"2017","author":"zhang","key":"ref22"},{"article-title":"Wide residual networks","year":"2016","author":"zagoruyko","key":"ref21","doi-asserted-by":"crossref","DOI":"10.5244\/C.30.87"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00020"}],"event":{"name":"2020 IEEE International Conference on Visual Communications and Image Processing (VCIP)","start":{"date-parts":[[2020,12,1]]},"location":"Macau, China","end":{"date-parts":[[2020,12,4]]}},"container-title":["2020 IEEE International Conference on Visual Communications and Image Processing (VCIP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9301747\/9301748\/09301856.pdf?arnumber=9301856","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,30]],"date-time":"2022-06-30T11:17:51Z","timestamp":1656587871000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9301856\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,12,1]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/vcip49819.2020.9301856","relation":{},"subject":[],"published":{"date-parts":[[2020,12,1]]}}}