{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,7,7]],"date-time":"2024-07-07T02:05:40Z","timestamp":1720317940433},"reference-count":53,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Expert Systems with Applications"],"published-print":{"date-parts":[[2024,7]]},"DOI":"10.1016\/j.eswa.2023.123085","type":"journal-article","created":{"date-parts":[[2023,12,29]],"date-time":"2023-12-29T17:11:37Z","timestamp":1703869897000},"page":"123085","update-policy":"http:\/\/dx.doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["Enhancing adversarial robustness with randomized interlayer processing"],"prefix":"10.1016","volume":"245","author":[{"ORCID":"http:\/\/orcid.org\/0000-0002-9494-8809","authenticated-orcid":false,"given":"Ameer","family":"Mohammed","sequence":"first","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0002-1997-5209","authenticated-orcid":false,"given":"Ziad","family":"Ali","sequence":"additional","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0002-0673-7324","authenticated-orcid":false,"given":"Imtiaz","family":"Ahmad","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.eswa.2023.123085_b1","series-title":"Advances in neural information processing systems 32","first-page":"12192","article-title":"Are labels required for improving adversarial robustness?","author":"Alayrac","year":"2019"},{"key":"10.1016\/j.eswa.2023.123085_b2","series-title":"Computer vision - ECCV 2020 - 16th European conference, Glasgow, UK, August 23-28, 2020, Proceedings, part XXIII","first-page":"484","article-title":"Square attack: A query-efficient black-box adversarial attack via random search","volume":"vol. 12368","author":"Andriushchenko","year":"2020"},{"key":"10.1016\/j.eswa.2023.123085_b3","series-title":"Proceedings of the 35th international conference on machine learning, ICML 2018, Stockholmsm\u00e4ssan, Stockholm, Sweden, July 10-15, 2018","first-page":"274","article-title":"Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples","volume":"vol. 80","author":"Athalye","year":"2018"},{"key":"10.1016\/j.eswa.2023.123085_b4","series-title":"Proceedings of the 35th international conference on machine learning","first-page":"284","article-title":"Synthesizing robust adversarial examples","volume":"vol. 80","author":"Athalye","year":"2018"},{"key":"10.1016\/j.eswa.2023.123085_b5","series-title":"Advances in neural information processing systems 31: Annual conference on neural information processing systems 2018, NeurIPS 2018, December 3-8, 2018, Montr\u00e9al, Canada","first-page":"10096","article-title":"Thwarting adversarial examples: An L_0-robust sparse Fourier transform","author":"Bafna","year":"2018"},{"key":"10.1016\/j.eswa.2023.123085_b6","series-title":"ICLR 2022 workshop on new frontiers in adversarial machine learning","article-title":"Early layers are more important for adversarial robustness","author":"Bakiskan","year":"2022"},{"key":"10.1016\/j.eswa.2023.123085_b7","series-title":"Machine learning and knowledge discovery in databases","first-page":"387","article-title":"Evasion attacks against machine learning at test time","author":"Biggio","year":"2013"},{"key":"10.1016\/j.eswa.2023.123085_b8","doi-asserted-by":"crossref","unstructured":"Byun, J., Go, H., & Kim, C. (2022). On the Effectiveness of Small Input Noise for Defending Against Query-based Black-Box Attacks. In Proceedings of the IEEE\/CVF winter conference on applications of computer vision (pp. 3051\u20133060).","DOI":"10.1109\/WACV51458.2022.00387"},{"key":"10.1016\/j.eswa.2023.123085_b9","series-title":"On evaluating adversarial robustness","author":"Carlini","year":"2019"},{"key":"10.1016\/j.eswa.2023.123085_b10","series-title":"Unlabeled data improves adversarial robustness","author":"Carmon","year":"2019"},{"key":"10.1016\/j.eswa.2023.123085_b11","doi-asserted-by":"crossref","DOI":"10.1016\/j.mlwa.2021.100134","article-title":"Deep learning in computer vision: A critical review of emerging techniques and application scenarios","volume":"6","author":"Chai","year":"2021","journal-title":"Machine Learning with Applications"},{"key":"10.1016\/j.eswa.2023.123085_b12","unstructured":"Croce, F., Andriushchenko, M., Sehwag, V., Debenedetti, E., Flammarion, N., Chiang, M., Mittal, P., & Hein, M. (2021). RobustBench: a standardized adversarial robustness benchmark. In J. Vanschoren, & S. Yeung (Eds.), Proceedings of the neural information processing systems track on datasets and benchmarks 1, NeurIPS datasets and benchmarks 2021, December 2021, Virtual."},{"key":"10.1016\/j.eswa.2023.123085_b13","series-title":"Proceedings of the 37th international conference on machine learning, ICML 2020, 13-18 July 2020, Virtual event","first-page":"2196","article-title":"Minimally distorted adversarial examples with a fast adaptive boundary attack","volume":"vol. 119","author":"Croce","year":"2020"},{"key":"10.1016\/j.eswa.2023.123085_b14","series-title":"International conference on machine learning","first-page":"2206","article-title":"Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks","author":"Croce","year":"2020"},{"key":"10.1016\/j.eswa.2023.123085_b15","series-title":"On the interplay of adversarial robustness and architecture components: patches, convolution and attention","author":"Croce","year":"2022"},{"key":"10.1016\/j.eswa.2023.123085_b16","series-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018"},{"key":"10.1016\/j.eswa.2023.123085_b17","series-title":"2023 IEEE\/CVF conference on computer vision and pattern recognition (CVPR)","first-page":"24678","article-title":"The enemy of my enemy is my friend: Exploring inverse adversaries for improving adversarial training","author":"Dong","year":"2023"},{"key":"10.1016\/j.eswa.2023.123085_b18","series-title":"On the connection between adversarial robustness and saliency map interpretability","author":"Etmann","year":"2019"},{"issue":"7","key":"10.1016\/j.eswa.2023.123085_b19","doi-asserted-by":"crossref","DOI":"10.1145\/3464423","article-title":"Deep learning for medical anomaly detection \u2013 A survey","volume":"54","author":"Fernando","year":"2021","journal-title":"ACM Computing Surveys"},{"key":"10.1016\/j.eswa.2023.123085_b20","series-title":"Advances in neural information processing systems","article-title":"On the limitations of stochastic pre-processing defenses","author":"Gao","year":"2022"},{"key":"10.1016\/j.eswa.2023.123085_b21","series-title":"3rd International conference on learning representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference track proceedings","article-title":"Explaining and harnessing adversarial examples","author":"Goodfellow","year":"2015"},{"key":"10.1016\/j.eswa.2023.123085_b22","series-title":"2020 IEEE\/CVF conference on computer vision and pattern recognition (CVPR)","first-page":"628","article-title":"When NAS meets robustness: In search of robust architectures against adversarial attacks","author":"Guo","year":"2020"},{"key":"10.1016\/j.eswa.2023.123085_b23","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 770\u2013778).","DOI":"10.1109\/CVPR.2016.90"},{"key":"10.1016\/j.eswa.2023.123085_b24","series-title":"Advances in neural information processing systems 33: Annual conference on neural information processing systems 2020, NeurIPS 2020, December 6-12, 2020, Virtual","article-title":"Denoising diffusion probabilistic models","author":"Ho","year":"2020"},{"key":"10.1016\/j.eswa.2023.123085_b25","series-title":"NeurIPS","article-title":"Exploring architectural ingredients of adversarially robust deep neural networks","author":"Huang","year":"2021"},{"key":"10.1016\/j.eswa.2023.123085_b26","doi-asserted-by":"crossref","unstructured":"Jia, X., Zhang, Y., Wu, B., Ma, K., Wang, J., & Cao, X. (2022). LAS-AT: adversarial training with learnable attack strategy. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 13398\u201313408).","DOI":"10.1109\/CVPR52688.2022.01304"},{"key":"10.1016\/j.eswa.2023.123085_b27","series-title":"Adversarial logit pairing","author":"Kannan","year":"2018"},{"key":"10.1016\/j.eswa.2023.123085_b28","series-title":"NeurIPS","article-title":"Elucidating the design space of diffusion-based generative models","author":"Karras","year":"2022"},{"key":"10.1016\/j.eswa.2023.123085_b29","series-title":"Adversarial examples in the physical world","author":"Kurakin","year":"2016"},{"key":"10.1016\/j.eswa.2023.123085_b30","series-title":"6th International conference on learning representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference track proceedings","article-title":"Towards deep learning models resistant to adversarial attacks","author":"Madry","year":"2018"},{"key":"10.1016\/j.eswa.2023.123085_b31","series-title":"NIPS workshop on deep learning and unsupervised feature learning","article-title":"Reading digits in natural images with unsupervised feature learning","author":"Netzer","year":"2011"},{"key":"10.1016\/j.eswa.2023.123085_b32","series-title":"Advances in neural information processing systems","article-title":"Random noise defense against query-based black-box attacks","author":"Qin","year":"2021"},{"key":"10.1016\/j.eswa.2023.123085_b33","series-title":"Fixing data augmentation to improve adversarial robustness","author":"Rebuffi","year":"2021"},{"key":"10.1016\/j.eswa.2023.123085_b34","series-title":"NeurIPS","first-page":"5019","article-title":"Adversarially robust generalization requires more data","author":"Schmidt","year":"2018"},{"key":"10.1016\/j.eswa.2023.123085_b35","series-title":"9th International conference on learning representations, ICLR 2021, Virtual event, Austria, May 3-7, 2021","article-title":"Score-based generative modeling through stochastic differential equations","author":"Song","year":"2021"},{"key":"10.1016\/j.eswa.2023.123085_b36","series-title":"Computer vision \u2013 ECCV 2018","first-page":"644","article-title":"Is robustness the cost of accuracy? \u2013 A comprehensive study on the robustness of 18 deep image classification models","author":"Su","year":"2018"},{"key":"10.1016\/j.eswa.2023.123085_b37","series-title":"2nd International conference on learning representations, ICLR 2014, Banff, AB, Canada, April 14-16, 2014, Conference track proceedings","article-title":"Intriguing properties of neural networks","author":"Szegedy","year":"2014"},{"key":"10.1016\/j.eswa.2023.123085_b38","series-title":"RobustART: Benchmarking robustness on architecture design and training techniques","author":"Tang","year":"2021"},{"key":"10.1016\/j.eswa.2023.123085_b39","doi-asserted-by":"crossref","DOI":"10.1016\/j.cviu.2020.103154","article-title":"Task dependent deep LDA pruning of neural networks","volume":"203","author":"Tian","year":"2021","journal-title":"Computer Vision and Image Understanding"},{"issue":"22","key":"10.1016\/j.eswa.2023.123085_b40","doi-asserted-by":"crossref","first-page":"22399","DOI":"10.1109\/JIOT.2021.3111024","article-title":"Adversarial attacks and defenses for deep-learning-based unmanned aerial vehicles","volume":"9","author":"Tian","year":"2022","journal-title":"IEEE Internet of Things Journal"},{"issue":"12","key":"10.1016\/j.eswa.2023.123085_b41","doi-asserted-by":"crossref","first-page":"13699","DOI":"10.1109\/TCYB.2021.3125345","article-title":"Joint adversarial example and false data injection attacks for state estimation in power systems","volume":"52","author":"Tian","year":"2022","journal-title":"IEEE Transactions on Cybernetics"},{"key":"10.1016\/j.eswa.2023.123085_b42","series-title":"Advances in neural information processing systems 33: Annual conference on neural information processing systems 2020, NeurIPS 2020, December 6-12, 2020, Virtual","article-title":"On adaptive attacks to adversarial example defenses","author":"Tram\u00e8r","year":"2020"},{"key":"10.1016\/j.eswa.2023.123085_b43","series-title":"Robustness may be at odds with accuracy","author":"Tsipras","year":"2018"},{"key":"10.1016\/j.eswa.2023.123085_b44","series-title":"International conference on machine learning","article-title":"On the convergence and robustness of adversarial training","author":"Wang","year":"2019"},{"key":"10.1016\/j.eswa.2023.123085_b45","series-title":"Better diffusion models further improve adversarial training","author":"Wang","year":"2023"},{"key":"10.1016\/j.eswa.2023.123085_b46","series-title":"International conference on learning representations","article-title":"Improving adversarial robustness requires revisiting misclassified examples","author":"Wang","year":"2019"},{"key":"10.1016\/j.eswa.2023.123085_b47","first-page":"2958","article-title":"Adversarial weight perturbation helps robust generalization","volume":"33","author":"Wu","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"10.1016\/j.eswa.2023.123085_b48","series-title":"Enhancing adversarial defense by k-winners-take-all","author":"Xiao","year":"2019"},{"key":"10.1016\/j.eswa.2023.123085_b49","series-title":"IEEE conference on computer vision and pattern recognition, CVPR 2019, Long Beach, CA, USA, June 16-20, 2019","first-page":"501","article-title":"Feature denoising for improving adversarial robustness","author":"Xie","year":"2019"},{"key":"10.1016\/j.eswa.2023.123085_b50","series-title":"25th Annual network and distributed system security symposium, NDSS 2018, San Diego, California, USA, February 18-21, 2018","article-title":"Feature squeezing: Detecting adversarial examples in deep neural networks","author":"Xu","year":"2018"},{"key":"10.1016\/j.eswa.2023.123085_b51","series-title":"Understanding adversarial robustness from feature maps of convolutional layers","author":"Xu","year":"2022"},{"key":"10.1016\/j.eswa.2023.123085_b52","unstructured":"Zhang, J., Xu, X., Han, B., Niu, G., Cui, L., Sugiyama, M., & Kankanhalli, M. (2020). Attacks Which Do Not Kill Training Make Adversarial Learning Stronger. In Proceedings of the 37th international conference on machine learning, ICML \u201920. JMLR.org."},{"key":"10.1016\/j.eswa.2023.123085_b53","series-title":"International conference on machine learning","first-page":"7472","article-title":"Theoretically principled trade-off between robustness and accuracy","author":"Zhang","year":"2019"}],"container-title":["Expert Systems with Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S095741742303587X?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S095741742303587X?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2024,4,7]],"date-time":"2024-04-07T08:34:24Z","timestamp":1712478864000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S095741742303587X"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7]]},"references-count":53,"alternative-id":["S095741742303587X"],"URL":"https:\/\/doi.org\/10.1016\/j.eswa.2023.123085","relation":{},"ISSN":["0957-4174"],"issn-type":[{"value":"0957-4174","type":"print"}],"subject":[],"published":{"date-parts":[[2024,7]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Enhancing adversarial robustness with randomized interlayer processing","name":"articletitle","label":"Article Title"},{"value":"Expert Systems with Applications","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.eswa.2023.123085","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2023 Elsevier Ltd. All rights reserved.","name":"copyright","label":"Copyright"}],"article-number":"123085"}}