{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T05:19:29Z","timestamp":1733203169284,"version":"3.30.0"},"publisher-location":"Cham","reference-count":48,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030585228"},{"type":"electronic","value":"9783030585235"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-58523-5_29","type":"book-chapter","created":{"date-parts":[[2020,12,3]],"date-time":"2020-12-03T20:13:16Z","timestamp":1607026396000},"page":"496-512","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":70,"title":["RadarNet: Exploiting Radar for Robust Perception of Dynamic Objects"],"prefix":"10.1007","author":[{"given":"Bin","family":"Yang","sequence":"first","affiliation":[]},{"given":"Runsheng","family":"Guo","sequence":"additional","affiliation":[]},{"given":"Ming","family":"Liang","sequence":"additional","affiliation":[]},{"given":"Sergio","family":"Casas","sequence":"additional","affiliation":[]},{"given":"Raquel","family":"Urtasun","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,12,4]]},"reference":[{"unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization. arXiv preprint arXiv:1607.06450 (2016)","key":"29_CR1"},{"key":"29_CR2","doi-asserted-by":"publisher","first-page":"780","DOI":"10.1109\/9.1299","volume":"33","author":"HA Blom","year":"1988","unstructured":"Blom, H.A., Bar-Shalom, Y.: The interacting multiple model algorithm for systems with Markovian switching coefficients. IEEE Trans. Autom. Control 33, 780\u2013783 (1988)","journal-title":"IEEE Trans. Autom. Control"},{"doi-asserted-by":"crossref","unstructured":"Caesar, H., et al.: nuScenes: ldataset for autonomous driving. In: CVPR (2020)","key":"29_CR3","DOI":"10.1109\/CVPR42600.2020.01164"},{"doi-asserted-by":"crossref","unstructured":"Chadwick, S., Maddetn, W., Newman, P.: Distant vehicle detection using radar and vision. In: ICRA (2019)","key":"29_CR4","DOI":"10.1109\/ICRA.2019.8794312"},{"doi-asserted-by":"crossref","unstructured":"Chen, X., Kundu, K., Zhang, Z., Ma, H., Fidler, S., Urtasun, R.: Monocular 3D object detection for autonomous driving. In: CVPR (2016)","key":"29_CR5","DOI":"10.1109\/CVPR.2016.236"},{"doi-asserted-by":"crossref","unstructured":"Chen, X., Ma, H., Wan, J., Li, B., Xia, T.: Multi-view 3D object detection network for autonomous driving. In: CVPR (2017)","key":"29_CR6","DOI":"10.1109\/CVPR.2017.691"},{"doi-asserted-by":"crossref","unstructured":"Cho, H., Seo, Y.W., Kumar, B.V., Rajkumar, R.R.: A multi-sensor fusion system for moving object detection and tracking in urban driving environments. In: 2014 IEEE International Conference on Robotics and Automation (ICRA), pp. 1836\u20131843. IEEE (2014)","key":"29_CR7","DOI":"10.1109\/ICRA.2014.6907100"},{"doi-asserted-by":"crossref","unstructured":"Danzer, A., Griebel, T., Bach, M., Dietmayer, K.: 2D car detection in radar data with pointNets. In: ITSC (2019)","key":"29_CR8","DOI":"10.1109\/ITSC.2019.8917000"},{"doi-asserted-by":"crossref","unstructured":"G\u00f6hring, D., Wang, M., Schn\u00fcrmacher, M., Ganjineh, T.: Radar\/lidar sensor fusion for car-following on highways. In: ICRA (2011)","key":"29_CR9","DOI":"10.1109\/ICARA.2011.6144918"},{"unstructured":"Hajri, H., Rahal, M.C.: Real time lidar and radar high-level fusion for obstacle detection and tracking with evaluation on a ground truth. Int. J. Mech. Mechatron. Eng. (2018)","key":"29_CR10"},{"unstructured":"Ioffe, S., Szegedy, C.: Batch normalization: accelerating deep network training by reducing internal covariate shift. In: ICML (2015)","key":"29_CR11"},{"doi-asserted-by":"crossref","unstructured":"Kellner, D., Klappstein, J., Dietmayer, K.: Grid-based DBSCAN for clustering extended objects in radar data. In: IEEE Intelligent Vehicles Symposium (2012)","key":"29_CR12","DOI":"10.1109\/IVS.2012.6232167"},{"doi-asserted-by":"crossref","unstructured":"Kim, S., Lee, S., Doo, S., Shim, B.: Moving target classification in automotive radar systems using convolutional recurrent neural networks. In: 26th European Signal Processing Conference (EUSIPCO) (2018)","key":"29_CR13","DOI":"10.23919\/EUSIPCO.2018.8553185"},{"unstructured":"Kingma, D., Ba, J.: Adam: a method for stochastic optimization. In: ICLR (2015)","key":"29_CR14"},{"doi-asserted-by":"crossref","unstructured":"Ku, J., Mozifian, M., Lee, J., Harakeh, A., Waslander, S.: Joint 3D proposal generation and object detection from view aggregation. In: IROS (2018)","key":"29_CR15","DOI":"10.1109\/IROS.2018.8594049"},{"doi-asserted-by":"crossref","unstructured":"Kuang, H., Liu, X., Zhang, J., Fang, Z.: Multi-modality cascaded fusion technology for autonomous driving. In: 4th International Conference on Robotics and Automation Sciences (ICRAS) (2020)","key":"29_CR16","DOI":"10.1109\/ICRAS49812.2020.9135065"},{"doi-asserted-by":"crossref","unstructured":"Lang, A.H., Vora, S., Caesar, H., Zhou, L., Yang, J., Beijbom, O.: PointPillars: fast encoders for object detection from point clouds. In: CVPR (2019)","key":"29_CR17","DOI":"10.1109\/CVPR.2019.01298"},{"doi-asserted-by":"crossref","unstructured":"Li, B.: 3D fully convolutional network for vehicle detection in point cloud. In: IROS (2017)","key":"29_CR18","DOI":"10.1109\/IROS.2017.8205955"},{"unstructured":"Li, B., Zhang, T., Xia, T.: Vehicle detection from 3D lidar using fully convolutional network. In: RSS (2016)","key":"29_CR19"},{"key":"29_CR20","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"663","DOI":"10.1007\/978-3-030-01270-0_39","volume-title":"Computer Vision \u2013 ECCV 2018","author":"M Liang","year":"2018","unstructured":"Liang, M., Yang, B., Wang, S., Urtasun, R.: Deep continuous fusion for multi-sensor 3D object detection. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11220, pp. 663\u2013678. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01270-0_39"},{"unstructured":"Liang, M., et al.: Object trajectory evolution for end-to-end perception and prediction. In: CVPR (2020)","key":"29_CR21"},{"unstructured":"Lim, T.Y., et al.: Radar and camera early fusion for vehicle detection in advanced driver assistance systems. In: Machine Learning for Autonomous Driving Workshop at the 33rd Conference on Neural Information Processing Systems (2019)","key":"29_CR22"},{"doi-asserted-by":"crossref","unstructured":"Lin, T.Y., Doll\u00e1r, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: CVPR (2017)","key":"29_CR23","DOI":"10.1109\/CVPR.2017.106"},{"doi-asserted-by":"crossref","unstructured":"Lin, T.Y., Goyal, P., Girshick, R., He, K., Doll\u00e1r, P.: Focal loss for dense object detection. In: ICCV (2017)","key":"29_CR24","DOI":"10.1109\/ICCV.2017.324"},{"doi-asserted-by":"crossref","unstructured":"Lombacher, J., Laudt, K., Hahn, M., Dickmann, J., W\u00f6hler, C.: Semantic radar grids. In: IEEE Intelligent Vehicles Symposium (IV) (2017)","key":"29_CR25","DOI":"10.1109\/IVS.2017.7995871"},{"doi-asserted-by":"crossref","unstructured":"Luo, W., Yang, B., Urtasun, R.: Fast and furious: real time end-to-end 3D detection, tracking and motion forecasting with a single convolutional net. In: CVPR (2018)","key":"29_CR26","DOI":"10.1109\/CVPR.2018.00376"},{"doi-asserted-by":"crossref","unstructured":"Meyer, G.P., Laddha, A., Kee, E., Vallespi-Gonzalez, C., Wellington, C.K.: LaserNet: an efficient probabilistic 3D object detector for autonomous driving. In: CVPR (2019)","key":"29_CR27","DOI":"10.1109\/CVPR.2019.01296"},{"unstructured":"Meyer, M., Kuschk, G.: Deep learning based 3D object detection for automotive radar and camera. In: 16th European Radar Conference (EuRAD) (2019)","key":"29_CR28"},{"doi-asserted-by":"crossref","unstructured":"Nabati, R., Qi, H.: RRPN: radar region proposal network for object detection in autonomous vehicles. In: ICIP (2019)","key":"29_CR29","DOI":"10.1109\/ICIP.2019.8803392"},{"doi-asserted-by":"crossref","unstructured":"Nobis, F., Geisslinger, M., Weber, M., Betz, J., Lienkamp, M.: A deep learning-based radar and camera sensor fusion architecture for object detection. In: Sensor Data Fusion: Trends, Solutions, Applications (SDF) (2019)","key":"29_CR30","DOI":"10.1109\/SDF.2019.8916629"},{"doi-asserted-by":"crossref","unstructured":"Patel, K., Rambach, K., Visentin, T., Rusev, D., Pfeiffer, M., Yang, B.: Deep learning-based object classification on automotive radar spectra. In: RadarConf (2019)","key":"29_CR31","DOI":"10.1109\/RADAR.2019.8835775"},{"doi-asserted-by":"crossref","unstructured":"Qi, C.R., Liu, W., Wu, C., Su, H., Guibas, L.J.: Frustum PointNets for 3D object detection from RGB-D data. In: CVPR (2018)","key":"29_CR32","DOI":"10.1109\/CVPR.2018.00102"},{"doi-asserted-by":"crossref","unstructured":"Schumann, O., Hahn, M., Dickmann, J., W\u00f6hler, C.: Semantic segmentation on radar point clouds. In: FUSION (2018)","key":"29_CR33","DOI":"10.23919\/ICIF.2018.8455344"},{"doi-asserted-by":"crossref","unstructured":"Shi, S., Wang, X., Li, H.: PointRCNN: 3D object proposal generation and detection from point cloud. In: CVPR (2019)","key":"29_CR34","DOI":"10.1109\/CVPR.2019.00086"},{"doi-asserted-by":"crossref","unstructured":"Shi, W., Rajkumar, R.: Point-GNN: graph neural network for 3D object detection in a point cloud. In: CVPR (2020)","key":"29_CR35","DOI":"10.1109\/CVPR42600.2020.00178"},{"doi-asserted-by":"crossref","unstructured":"Simonelli, A., Bulo, S.R., Porzi, L., L\u00f3pez-Antequera, M., Kontschieder, P.: Disentangling monocular 3D object detection. In: ICCV (2019)","key":"29_CR36","DOI":"10.1109\/ICCV.2019.00208"},{"key":"29_CR37","volume-title":"Radar Handbook","author":"MI Skolnik","year":"1990","unstructured":"Skolnik, M.I.: Radar Handbook, 2nd edn. McGrawHill, LOndon (1990)","edition":"2"},{"doi-asserted-by":"crossref","unstructured":"Sless, L., El Shlomo, B., Cohen, G., Oron, S.: Road scene understanding by occupancy grid learning from sparse radar clusters using semantic segmentation. In: Proceedings of the IEEE International Conference on Computer Vision Workshops (2019)","key":"29_CR38","DOI":"10.1109\/ICCVW.2019.00115"},{"issue":"6","key":"29_CR39","doi-asserted-by":"publisher","first-page":"1017","DOI":"10.1016\/j.automatica.2004.01.014","volume":"40","author":"SL Sun","year":"2004","unstructured":"Sun, S.L., Deng, Z.L.: Multi-sensor optimal information fusion Kalman filter. Automatica 40(6), 1017\u20131023 (2004)","journal-title":"Automatica"},{"doi-asserted-by":"crossref","unstructured":"Szegedy, C., et al.: Going deeper with convolutions. In: CVPR (2015)","key":"29_CR40","DOI":"10.1109\/CVPR.2015.7298594"},{"doi-asserted-by":"crossref","unstructured":"Vora, S., Lang, A.H., Helou, B., Beijbom, O.: PointPainting: sequential fusion for 3D object detection. In: CVPR (2020)","key":"29_CR41","DOI":"10.1109\/CVPR42600.2020.00466"},{"doi-asserted-by":"crossref","unstructured":"Weng, X., Kitani, K.: Monocular 3D object detection with pseudo-lidar point cloud. In: ICCVW (2019)","key":"29_CR42","DOI":"10.1109\/ICCVW.2019.00114"},{"doi-asserted-by":"crossref","unstructured":"W\u00f6hler, C., Schumann, O., Hahn, M., Dickmann, J.: Comparison of random forest and long short-term memory network performances in classification tasks using radar. In: Sensor Data Fusion: Trends, Solutions, Applications (SDF) (2017)","key":"29_CR43","DOI":"10.1109\/SDF.2017.8126350"},{"key":"29_CR44","doi-asserted-by":"publisher","first-page":"3337","DOI":"10.3390\/s18103337","volume":"18","author":"Y Yan","year":"2018","unstructured":"Yan, Y., Mao, Y., Li, B.: Second: sparsely embedded convolutional detection. Sensors 18, 3337 (2018)","journal-title":"Sensors"},{"doi-asserted-by":"crossref","unstructured":"Yang, B., Luo, W., Urtasun, R.: PIXOR: real-time 3D object detection from point clouds. In: CVPR (2018)","key":"29_CR45","DOI":"10.1109\/CVPR.2018.00798"},{"doi-asserted-by":"crossref","unstructured":"Yang, Z., Sun, Y., Liu, S., Jia, J.: 3DSSD: point-based 3D single stage object detector. In: CVPR (2020)","key":"29_CR46","DOI":"10.1109\/CVPR42600.2020.01105"},{"doi-asserted-by":"crossref","unstructured":"Zhou, Y., Tuzel, O.: VoxelNet: end-to-end learning for point cloud based 3D object detection. In: CVPR (2018)","key":"29_CR47","DOI":"10.1109\/CVPR.2018.00472"},{"unstructured":"Zhu, B., Jiang, Z., Zhou, X., Li, Z., Yu, G.: Class-balanced grouping and sampling for point cloud 3D object detection. arXiv preprint arXiv:1908.09492 (2019)","key":"29_CR48"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2020"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-58523-5_29","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T00:11:57Z","timestamp":1733184717000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-58523-5_29"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030585228","9783030585235"],"references-count":48,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-58523-5_29","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"4 December 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Glasgow","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"United Kingdom","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 August 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 August 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2020.eu\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"OpenReview","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5025","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1360","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"27% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"7","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"The conference was held virtually due to the COVID-19 pandemic. From the ECCV Workshops 249 full papers, 18 short papers, and 21 further contributions were published out of a total of 467 submissions.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}