{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T01:01:27Z","timestamp":1740099687255,"version":"3.37.3"},"publisher-location":"New York, NY, USA","reference-count":19,"publisher":"ACM","funder":[{"name":"Samsung Advanced Institute of Technology"},{"name":"UNIST","award":["1.170067.01"]},{"DOI":"10.13039\/501100003725","name":"National Research Foundation of Korea","doi-asserted-by":"publisher","award":["2020R1A2C2015066"],"id":[{"id":"10.13039\/501100003725","id-type":"DOI","asserted-by":"publisher"}]},{"name":"IC Design Education Center (IDEC)"},{"name":"Institute of Information & communications Technology Planning & Evaluation (IITP)","award":["2020-0-01336"]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2020,8,10]]},"DOI":"10.1145\/3370748.3406554","type":"proceedings-article","created":{"date-parts":[[2020,8,7]],"date-time":"2020-08-07T16:10:32Z","timestamp":1596816632000},"page":"211-216","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":3,"title":["SparTANN"],"prefix":"10.1145","author":[{"given":"Hyeonuk","family":"Sim","sequence":"first","affiliation":[{"name":"UNIST, Ulsan, South Korea and Seoul National University, Seoul, Korea"}]},{"given":"Jooyeon","family":"Choi","sequence":"additional","affiliation":[{"name":"UNIST, Ulsan, South Korea and Seoul National University, Seoul, Korea"}]},{"given":"Jongeun","family":"Lee","sequence":"additional","affiliation":[{"name":"UNIST, Ulsan, South Korea and Seoul National University, Seoul, Korea"}]}],"member":"320","published-online":{"date-parts":[[2020,8,10]]},"reference":[{"volume-title":"DianNao: A Small-footprint High-throughput Accelerator for Ubiquitous Machine-learning. In ASPLOS","year":"2014","key":"e_1_3_2_2_1_1"},{"volume-title":"Eyeriss: A Spatial Architecture for Energy-efficient Dataflow for Convolutional Neural Networks. In ISCA","year":"2016","key":"e_1_3_2_2_2_1"},{"volume-title":"TrainWare: A Memory Optimized Weight Update Architecture for On-Device Convolutional Neural Network Training. In ISLPED","year":"2018","key":"e_1_3_2_2_3_1"},{"key":"e_1_3_2_2_4_1","unstructured":"Google. 2019. Using bfloat16 with tensorflow models. https:\/\/cloud.google.com\/tpu\/docs\/bfloat16 Google. 2019. Using bfloat16 with tensorflow models. https:\/\/cloud.google.com\/tpu\/docs\/bfloat16"},{"volume-title":"ISCA","year":"2016","key":"e_1_3_2_2_5_1"},{"volume-title":"Flexpoint: An Adaptive Numerical Format for Efficient Training of Deep Neural Networks. In NIPS","year":"2017","key":"e_1_3_2_2_6_1"},{"volume-title":"DAC","year":"2019","key":"e_1_3_2_2_7_1"},{"volume-title":"TNPU: An Efficient Accelerator Architecture for Training Convolutional Neural Networks. In ASPDAC","year":"2019","key":"e_1_3_2_2_8_1"},{"volume-title":"Design Space Exploration of FPGA Accelerators for Convolutional Neural Networks. In DATE","year":"2017","key":"e_1_3_2_2_9_1"},{"volume-title":"ICML","year":"2017","author":"X","key":"e_1_3_2_2_10_1"},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"crossref","unstructured":"X. Sun etal 2020. Training Simplification and Model Simplification for Deep Learning: A Minimal Effort Back Propagation Method. IEEE Transactions on Knowledge and Data Engineering (2020). X. Sun et al. 2020. Training Simplification and Model Simplification for Deep Learning: A Minimal Effort Back Propagation Method. IEEE Transactions on Knowledge and Data Engineering (2020).","DOI":"10.1109\/TKDE.2018.2883613"},{"volume-title":"ISCA","year":"2017","author":"Parashar A.","key":"e_1_3_2_2_12_1"},{"key":"e_1_3_2_2_13_1","doi-asserted-by":"crossref","unstructured":"D. Kim etal 2018. ZeNA: Zero-Aware Neural Network Accelerator. IEEE Design Test (2018). D. Kim et al. 2018. ZeNA: Zero-Aware Neural Network Accelerator. IEEE Design Test (2018).","DOI":"10.1109\/MDAT.2017.2741463"},{"key":"e_1_3_2_2_14_1","unstructured":"Forrest N. Iandola et al. 2016. SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <1MB model size. CoRR abs\/1602.07360 (2016). Forrest N. Iandola et al. 2016. SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <1MB model size. CoRR abs\/1602.07360 (2016)."},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3007787.3001138"},{"volume-title":"DaDianNao: A Machine-Learning Supercomputer. In MICRO","year":"2014","author":"Chen Y.","key":"e_1_3_2_2_16_1"},{"volume-title":"Caffe: Convolutional Architecture for Fast Feature Embedding. CoRR abs\/1408.5093","year":"2014","key":"e_1_3_2_2_17_1"},{"volume-title":"HP Laboratories","year":"2008","key":"e_1_3_2_2_18_1"},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952679"}],"event":{"name":"ISLPED '20: ACM\/IEEE International Symposium on Low Power Electronics and Design","sponsor":["SIGDA ACM Special Interest Group on Design Automation","IEEE CAS"],"location":"Boston Massachusetts","acronym":"ISLPED '20"},"container-title":["Proceedings of the ACM\/IEEE International Symposium on Low Power Electronics and Design"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3370748.3406554","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,12]],"date-time":"2023-01-12T02:55:01Z","timestamp":1673492101000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3370748.3406554"}},"subtitle":["sparse training accelerator for neural networks with threshold-based sparsification"],"short-title":[],"issued":{"date-parts":[[2020,8,10]]},"references-count":19,"alternative-id":["10.1145\/3370748.3406554","10.1145\/3370748"],"URL":"https:\/\/doi.org\/10.1145\/3370748.3406554","relation":{},"subject":[],"published":{"date-parts":[[2020,8,10]]},"assertion":[{"value":"2020-08-10","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}