{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,8]],"date-time":"2024-09-08T04:44:23Z","timestamp":1725770663167},"reference-count":19,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,11,29]],"date-time":"2021-11-29T00:00:00Z","timestamp":1638144000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,11,29]],"date-time":"2021-11-29T00:00:00Z","timestamp":1638144000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,11,29]],"date-time":"2021-11-29T00:00:00Z","timestamp":1638144000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,11,29]]},"DOI":"10.1109\/milcom52596.2021.9652947","type":"proceedings-article","created":{"date-parts":[[2021,12,30]],"date-time":"2021-12-30T21:06:10Z","timestamp":1640898370000},"page":"886-891","source":"Crossref","is-referenced-by-count":1,"title":["HoneyModels: Machine Learning Honeypots"],"prefix":"10.1109","author":[{"given":"Ahmed","family":"Abdou","sequence":"first","affiliation":[]},{"given":"Ryan","family":"Sheatsley","sequence":"additional","affiliation":[]},{"given":"Yohan","family":"Beugin","sequence":"additional","affiliation":[]},{"given":"Tyler","family":"Shipp","sequence":"additional","affiliation":[]},{"given":"Patrick","family":"McDaniel","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Transferability in machine learning: from phenomena to black-box attacks using adversarial samples","author":"papernot","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3052973.3053009"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP.2016.36"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2016.41"},{"key":"ref14","article-title":"Certified defenses against adversarial examples","author":"raghunathan","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978392"},{"key":"ref16","article-title":"Intriguing properties of neural networks","author":"szegedy","year":"2013","journal-title":"ArXiv Preprint"},{"key":"ref17","article-title":"Evaluating robustness of neural networks with mixed integer programming","author":"tjeng","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref18","article-title":"Mixtrain: Scalable training of formally robust neural networks","author":"wang","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2010.936730"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/S0378-7206(01)00091-X"},{"key":"ref6","first-page":"485","article-title":"Deepsigns: an end-to-end watermarking framework for ownership protection of deep neural networks","author":"rouhani","year":"0","journal-title":"Proceedings of the fourth international conference on Architectural support for programming languages and operating systems - AS"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.29007\/3b2l"},{"journal-title":"Adversarial examples in the physical world","year":"2017","author":"kurakin","key":"ref8"},{"key":"ref7","article-title":"Badnets: Identifying vulnerabilities in the machine learning model supply chain","author":"gu","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref2","article-title":"End to end learning for self-driving cars","author":"bojarski","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref1","first-page":"1615","article-title":"Turning your weakness into a strength: Watermarking deep neural networks by backdooring","author":"adi","year":"0","journal-title":"27th USENIX Security Symposium ( USENIX Security 18)"},{"key":"ref9","article-title":"Towards deep learning models resistant to adversarial attacks","author":"madry","year":"2017","journal-title":"ArXiv Preprint"}],"event":{"name":"MILCOM 2021 - 2021 IEEE Military Communications Conference (MILCOM)","start":{"date-parts":[[2021,11,29]]},"location":"San Diego, CA, USA","end":{"date-parts":[[2021,12,2]]}},"container-title":["MILCOM 2021 - 2021 IEEE Military Communications Conference (MILCOM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9652874\/9652880\/09652947.pdf?arnumber=9652947","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T17:00:02Z","timestamp":1652202002000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9652947\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,11,29]]},"references-count":19,"URL":"https:\/\/doi.org\/10.1109\/milcom52596.2021.9652947","relation":{},"subject":[],"published":{"date-parts":[[2021,11,29]]}}}