{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,12,26]],"date-time":"2024-12-26T07:40:07Z","timestamp":1735198807553,"version":"3.32.0"},"reference-count":53,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,10,14]],"date-time":"2024-10-14T00:00:00Z","timestamp":1728864000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,10,14]],"date-time":"2024-10-14T00:00:00Z","timestamp":1728864000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,10,14]]},"DOI":"10.1109\/iros58592.2024.10802066","type":"proceedings-article","created":{"date-parts":[[2024,12,25]],"date-time":"2024-12-25T19:17:39Z","timestamp":1735154259000},"page":"595-602","source":"Crossref","is-referenced-by-count":0,"title":["Mitigating Adversarial Perturbations for Deep Reinforcement Learning via Vector Quantization"],"prefix":"10.1109","author":[{"given":"Tung M.","family":"Luu","sequence":"first","affiliation":[{"name":"Korea Advanced Institute of Science and Technology,School of Electrical Engineering,Daejeon,Republic of Korea,34141"}]},{"given":"Thanh","family":"Nguyen","sequence":"additional","affiliation":[{"name":"Korea Advanced Institute of Science and Technology,School of Electrical Engineering,Daejeon,Republic of Korea,34141"}]},{"given":"Tee Joshua","family":"Tian Jin","sequence":"additional","affiliation":[{"name":"Korea Advanced Institute of Science and Technology,School of Electrical Engineering,Daejeon,Republic of Korea,34141"}]},{"given":"Sungwoon","family":"Kim","sequence":"additional","affiliation":[{"name":"Korea University,Department of Artificial Intelligence,Seoul,Republic of Korea,02841"}]},{"given":"Chang D.","family":"Yoo","sequence":"additional","affiliation":[{"name":"Korea Advanced Institute of Science and Technology,School of Electrical Engineering,Daejeon,Republic of Korea,34141"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-62416-7_19"},{"article-title":"Whatever does not kill deep reinforcement learning, makes it stronger","year":"2017","author":"Behzadan","key":"ref2"},{"article-title":"Estimating or propagating gradients through stochastic neurons for conditional computation","year":"2013","author":"Bengio","key":"ref3"},{"key":"ref4","article-title":"Convergence properties of the k-means algorithms","author":"Bottou","year":"1994","journal-title":"NeurIPS"},{"key":"ref5","article-title":"Quantifying generalization in reinforcement learning","author":"Cobbe","year":"2019","journal-title":"ICML"},{"article-title":"A study of the effect of jpg compression on adversarial images","year":"2016","author":"Dziugaite","key":"ref6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/3-540-45327-X_24"},{"article-title":"D4rl: Datasets for deep data-driven reinforcement learning","year":"2020","author":"Fu","key":"ref8"},{"key":"ref9","article-title":"A minimalist approach to offline reinforcement learning","author":"Fujimoto","year":"2021","journal-title":"NeurIPS"},{"key":"ref10","article-title":"Addressing function approximation error in actor-critic methods","author":"Fujimoto","year":"2018","journal-title":"ICML"},{"article-title":"Explaining and harnessing adversarial examples","year":"2014","author":"Goodfellow","key":"ref11"},{"key":"ref12","article-title":"Countering adversarial images using input transformations","author":"Guo","year":"2018","journal-title":"ICLR"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00681"},{"key":"ref14","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"Haarnoja","year":"2018","journal-title":"ICML"},{"article-title":"Adversarial attacks on neural network policies","year":"2017","author":"Huang","key":"ref15"},{"article-title":"Discrete factorial representations as an abstraction for goal conditioned reinforcement learning","year":"2022","author":"Islam","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683044"},{"article-title":"Delving into adversarial attacks on deep policies","year":"2017","author":"Kos","key":"ref18"},{"article-title":"Image augmentation is all you need: Regularizing deep reinforcement learning from pixels","year":"2020","author":"Kostrikov","key":"ref19"},{"article-title":"Adversarial machine learning at scale","year":"2016","author":"Kurakin","key":"ref20"},{"key":"ref21","article-title":"Reinforcement learning with augmented data","author":"Laskin","year":"2020","journal-title":"NeurIPS"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.3182\/20020721-6-ES-1901.01068"},{"key":"ref23","article-title":"Network randomization: A simple technique for generalization in deep reinforcement learning","author":"Lee","year":"2020","journal-title":"ICLR"},{"article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","year":"2020","author":"Levine","key":"ref24"},{"key":"ref25","article-title":"Efficient adversarial training without attacking: Worst-case-aware robust reinforcement learning","author":"Liang","year":"2022","journal-title":"NeurIPS"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00191"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/525"},{"key":"ref28","article-title":"Discrete-valued neural communication","author":"Liu","year":"2021","journal-title":"NeurIPS"},{"article-title":"No need to worry about adversarial examples in object detection in autonomous vehicles","year":"2017","author":"Lu","key":"ref29"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2022.3182107"},{"article-title":"Towards deep learning models resistant to adversarial attacks","year":"2017","author":"Madry","key":"ref31"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.23919\/ACC50511.2021.9483052"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref34","article-title":"Diffusion models for adversarial purification","author":"Nie","year":"2022","journal-title":"ICML"},{"key":"ref35","article-title":"Robust deep reinforcement learning through adversarial loss","author":"Oikarinen","year":"2021","journal-title":"NeurIPS"},{"key":"ref36","article-title":"Vector quantized models for planning","author":"Ozair","year":"2021","journal-title":"ICML"},{"key":"ref37","article-title":"Robust deep reinforcement learning with adversarial attacks","author":"Pattanaik","year":"2018","journal-title":"AAMAS"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00894"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.23919\/ACC50511.2021.9483025"},{"key":"ref40","article-title":"Denoised smoothing: A provable defense for pretrained classifiers","author":"Salman","year":"2020","journal-title":"NeurIPS"},{"key":"ref41","article-title":"Defensegan: Protecting classifiers against adversarial attacks using generative models","author":"Samangouei","year":"2018","journal-title":"ICLR"},{"key":"ref42","article-title":"Deep reinforcement learning with robust and smooth policy","author":"Shen","year":"2020","journal-title":"ICML"},{"key":"ref43","article-title":"Who is the strongest enemy? towards optimal and efficient evasion attacks in deep rl","author":"Sun","year":"2022","journal-title":"ICLR"},{"article-title":"Intriguing properties of neural networks","year":"2013","author":"Szegedy","key":"ref44"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202133"},{"key":"ref46","article-title":"Neural discrete representation learning","author":"Van Den Oord","year":"2017","journal-title":"NeurIPS"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2018.23198"},{"key":"ref49","article-title":"Rorl: Robust offline reinforcement learning via conservative smoothing","author":"Yang","year":"2022","journal-title":"NeurIPS"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2019.01.003"},{"key":"ref51","article-title":"Theoretically principled trade-off between robustness and accuracy","author":"Zhang","year":"2019","journal-title":"ICML"},{"key":"ref52","article-title":"Robust reinforcement learning on state observations with learned optimal adversary","author":"Zhang","year":"2021","journal-title":"ICLR"},{"key":"ref53","article-title":"Robust deep reinforcement learning against adversarial perturbations on state observations","author":"Zhang","year":"2020","journal-title":"NeurIPS"}],"event":{"name":"2024 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","start":{"date-parts":[[2024,10,14]]},"location":"Abu Dhabi, United Arab Emirates","end":{"date-parts":[[2024,10,18]]}},"container-title":["2024 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10801246\/10801290\/10802066.pdf?arnumber=10802066","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,26]],"date-time":"2024-12-26T06:59:19Z","timestamp":1735196359000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10802066\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,14]]},"references-count":53,"URL":"https:\/\/doi.org\/10.1109\/iros58592.2024.10802066","relation":{},"subject":[],"published":{"date-parts":[[2024,10,14]]}}}