{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,4]],"date-time":"2024-09-04T11:40:39Z","timestamp":1725450039339},"reference-count":238,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2022,5,1]],"date-time":"2022-05-01T00:00:00Z","timestamp":1651363200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2022,5,1]],"date-time":"2022-05-01T00:00:00Z","timestamp":1651363200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2022,5,1]],"date-time":"2022-05-01T00:00:00Z","timestamp":1651363200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2022,5,1]],"date-time":"2022-05-01T00:00:00Z","timestamp":1651363200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2022,5,1]],"date-time":"2022-05-01T00:00:00Z","timestamp":1651363200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,1]],"date-time":"2022-05-01T00:00:00Z","timestamp":1651363200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Expert Systems with Applications"],"published-print":{"date-parts":[[2022,5]]},"DOI":"10.1016\/j.eswa.2021.116424","type":"journal-article","created":{"date-parts":[[2022,1,10]],"date-time":"2022-01-10T12:59:33Z","timestamp":1641819573000},"page":"116424","update-policy":"http:\/\/dx.doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":23,"special_numbering":"C","title":["3D Human Action Recognition: Through the eyes of researchers"],"prefix":"10.1016","volume":"193","author":[{"ORCID":"http:\/\/orcid.org\/0000-0003-2788-7644","authenticated-orcid":false,"given":"Arya","family":"Sarkar","sequence":"first","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0003-3536-9847","authenticated-orcid":false,"given":"Avinandan","family":"Banerjee","sequence":"additional","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0002-9598-7981","authenticated-orcid":false,"given":"Pawan Kumar","family":"Singh","sequence":"additional","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0001-8813-4086","authenticated-orcid":false,"given":"Ram","family":"Sarkar","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"issue":"13","key":"10.1016\/j.eswa.2021.116424_b1","doi-asserted-by":"crossref","first-page":"2381","DOI":"10.1049\/iet-ipr.2019.0350","article-title":"Advances in human action recognition: an updated survey","volume":"13","author":"Abu-Bakar","year":"2019","journal-title":"IET Image Processing"},{"issue":"3","key":"10.1016\/j.eswa.2021.116424_b2","doi-asserted-by":"crossref","first-page":"1445","DOI":"10.1109\/JSEN.2019.2947446","article-title":"Human action recognition using deep multilevel multimodal M2 fusion of depth and inertial sensors","volume":"20","author":"Ahmad","year":"2019","journal-title":"IEEE Sensors Journal"},{"key":"10.1016\/j.eswa.2021.116424_b3","first-page":"1","article-title":"Improved coral reefs optimization with adaptive \u03b2-hill climbing for feature selection","author":"Ahmed","year":"2020","journal-title":"Neural Computing and Applications"},{"key":"10.1016\/j.eswa.2021.116424_b4","series-title":"2015 IEEE International conference on computer graphics, vision and information security","first-page":"94","article-title":"Action recognition for human robot interaction in industrial applications","author":"Akkaladevi","year":"2015"},{"issue":"6","key":"10.1016\/j.eswa.2021.116424_b5","doi-asserted-by":"crossref","first-page":"46","DOI":"10.3390\/jimaging6060046","article-title":"A review on computer vision-based methods for human action recognition","volume":"6","author":"Al-Faris","year":"2020","journal-title":"Journal of Imaging"},{"key":"10.1016\/j.eswa.2021.116424_b6","doi-asserted-by":"crossref","unstructured":"Ali,\u00a0S., & Bouguila,\u00a0N. (2019). Variational learning of beta-liouville hidden markov models for infrared action recognition. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition workshops.","DOI":"10.1109\/CVPRW.2019.00119"},{"key":"10.1016\/j.eswa.2021.116424_b7","doi-asserted-by":"crossref","first-page":"94","DOI":"10.1016\/j.cogsys.2019.05.002","article-title":"Human action recognition from RGB-D data using complete local binary pattern","volume":"58","author":"Arivazhagan","year":"2019","journal-title":"Cognitive Systems Research"},{"issue":"5","key":"10.1016\/j.eswa.2021.116424_b8","doi-asserted-by":"crossref","first-page":"5919","DOI":"10.1007\/s11042-018-6875-7","article-title":"Fusing depth and colour information for human action recognition","volume":"78","author":"Avola","year":"2019","journal-title":"Multimedia Tools and Applications"},{"key":"10.1016\/j.eswa.2021.116424_b9","article-title":"Fuzzy integral based CNN classifier fusion for 3D skeleton action recognition","author":"Banerjee","year":"2020","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"10.1016\/j.eswa.2021.116424_b10","doi-asserted-by":"crossref","unstructured":"Baradel,\u00a0F., Wolf,\u00a0C., Mille,\u00a0J., & Taylor,\u00a0G. W. (2018). Glimpse clouds: Human activity recognition from unstructured feature points. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 469\u2013478).","DOI":"10.1109\/CVPR.2018.00056"},{"issue":"5","key":"10.1016\/j.eswa.2021.116424_b11","doi-asserted-by":"crossref","first-page":"2070","DOI":"10.3390\/app11052070","article-title":"How to correctly detect face-masks for COVID-19 from visual information?","volume":"11","author":"Batagelj","year":"2021","journal-title":"Applied Sciences"},{"key":"10.1016\/j.eswa.2021.116424_b12","unstructured":"Ben-Musa,\u00a0A. S., Singh,\u00a0S. K., & Agrawal,\u00a0P. (2014). Suspicious Human Activity Recognition for Video Surveillance System. In Proc. of the int. conf. on control, instrumentation, comm. & comp. technologies."},{"key":"10.1016\/j.eswa.2021.116424_b13","series-title":"2012 IEEE computer society conference on computer vision and pattern recognition workshops","first-page":"7","article-title":"G3D: A gaming action dataset and real time action recognition evaluation framework","author":"Bloom","year":"2012"},{"issue":"1","key":"10.1016\/j.eswa.2021.116424_b14","doi-asserted-by":"crossref","first-page":"374","DOI":"10.3390\/app10010374","article-title":"A vision-based system for monitoring elderly people at home","volume":"10","author":"Buzzelli","year":"2020","journal-title":"Applied Sciences"},{"key":"10.1016\/j.eswa.2021.116424_b15","series-title":"2019 32nd SIBGRAPI conference on graphics, patterns and images","first-page":"16","article-title":"Skeleton image representation for 3d action recognition based on tree structure and reference joints","author":"Caetano","year":"2019"},{"key":"10.1016\/j.eswa.2021.116424_b16","doi-asserted-by":"crossref","DOI":"10.1109\/TCSVT.2018.2879913","article-title":"Skeleton-based action recognition with gated convolutional neural networks","author":"Cao","year":"2019","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"10.1016\/j.eswa.2021.116424_b17","doi-asserted-by":"crossref","unstructured":"Cao,\u00a0Z., Simon,\u00a0T., Wei,\u00a0S.-E., & Sheikh,\u00a0Y. (2017). Realtime multi-person 2d pose estimation using part affinity fields. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 7291\u20137299).","DOI":"10.1109\/CVPR.2017.143"},{"key":"10.1016\/j.eswa.2021.116424_b18","series-title":"International conference on image analysis and processing","first-page":"436","article-title":"Recognition of human actions from rgb-d videos using a reject option","author":"Carletti","year":"2013"},{"key":"10.1016\/j.eswa.2021.116424_b19","doi-asserted-by":"crossref","unstructured":"Carreira,\u00a0J., & Zisserman,\u00a0A. (2017). Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 6299\u20136308).","DOI":"10.1109\/CVPR.2017.502"},{"issue":"1","key":"10.1016\/j.eswa.2021.116424_b20","doi-asserted-by":"crossref","first-page":"51","DOI":"10.1109\/THMS.2014.2362520","article-title":"Improving human action recognition using fusion of depth camera and inertial sensors","volume":"45","author":"Chen","year":"2014","journal-title":"IEEE Transactions on Human-Machine Systems"},{"issue":"3","key":"10.1016\/j.eswa.2021.116424_b21","doi-asserted-by":"crossref","first-page":"773","DOI":"10.1109\/JSEN.2015.2487358","article-title":"A real-time human action recognition system using depth and inertial sensor fusion","volume":"16","author":"Chen","year":"2015","journal-title":"IEEE Sensors Journal"},{"key":"10.1016\/j.eswa.2021.116424_b22","series-title":"2015 IEEE international conference on image processing","first-page":"168","article-title":"UTD-MHAD: A multimodal dataset for human action recognition utilizing a depth camera and a wearable inertial sensor","author":"Chen","year":"2015"},{"issue":"3","key":"10.1016\/j.eswa.2021.116424_b23","doi-asserted-by":"crossref","first-page":"4405","DOI":"10.1007\/s11042-015-3177-1","article-title":"A survey of depth and inertial sensor fusion for human action recognition","volume":"76","author":"Chen","year":"2017","journal-title":"Multimedia Tools and Applications"},{"issue":"4","key":"10.1016\/j.eswa.2021.116424_b24","doi-asserted-by":"crossref","first-page":"458","DOI":"10.26599\/TST.2019.9010018","article-title":"Survey of pedestrian action recognition techniques for autonomous driving","volume":"25","author":"Chen","year":"2020","journal-title":"Tsinghua Science and Technology"},{"key":"10.1016\/j.eswa.2021.116424_b25","series-title":"European conference on computer vision","first-page":"52","article-title":"Human daily action analysis with multi-view and color-depth data","author":"Cheng","year":"2012"},{"key":"10.1016\/j.eswa.2021.116424_b26","doi-asserted-by":"crossref","unstructured":"Cheng,\u00a0K., Zhang,\u00a0Y., He,\u00a0X., Chen,\u00a0W., Cheng,\u00a0J., & Lu,\u00a0H. (2020). Skeleton-Based Action Recognition With Shift Graph Convolutional Network. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 183\u2013192).","DOI":"10.1109\/CVPR42600.2020.00026"},{"key":"10.1016\/j.eswa.2021.116424_b27","series-title":"2013 IEEE international conference on computational intelligence and computing research","first-page":"1","article-title":"Automated attendance management system based on face recognition algorithms","author":"Chintalapati","year":"2013"},{"key":"10.1016\/j.eswa.2021.116424_b28","series-title":"International conference on virtual systems and multimedia","first-page":"112","article-title":"A view-based real-time human action recognition system as an interface for human computer interaction","author":"Choi","year":"2007"},{"key":"10.1016\/j.eswa.2021.116424_b29","series-title":"2011 IEEE international conference on fuzzy systems","first-page":"484","article-title":"Human action recognition via sum-rule fusion of fuzzy K-nearest neighbor classifiers","author":"Chua","year":"2011"},{"key":"10.1016\/j.eswa.2021.116424_b30","series-title":"European conference on computer vision","first-page":"695","article-title":"Movement pattern histogram for action recognition and retrieval","author":"Ciptadi","year":"2014"},{"key":"10.1016\/j.eswa.2021.116424_b31","series-title":"Counterterrorism, crime fighting, forensics, and surveillance technologies II, Vol. 10802","first-page":"108020L","article-title":"Autonomous computational intelligence-based behaviour recognition in security and surveillance","author":"Clift","year":"2018"},{"key":"10.1016\/j.eswa.2021.116424_b32","doi-asserted-by":"crossref","unstructured":"Concha,\u00a0O. P., Xu,\u00a0R. Y. D., & Piccardi,\u00a0M. (2010). Robust Dimensionality Reduction for Human Action Recognition. In 2010 International conference on digital image computing: techniques and applications (pp. 349\u2013356).","DOI":"10.1109\/DICTA.2010.66"},{"key":"10.1016\/j.eswa.2021.116424_b33","article-title":"Sensor-based and vision-based human activity recognition: A comprehensive survey","volume":"108","author":"Dang","year":"2020","journal-title":"Pattern Recognition"},{"key":"10.1016\/j.eswa.2021.116424_b34","series-title":"2019 Joint 8th international conference on informatics, electronics vision (ICIEV) and 2019 3rd international conference on imaging, vision pattern recognition","first-page":"134","article-title":"Challenges in sensor-based human activity recognition and a comparative analysis of benchmark datasets: A review","author":"Das\u00a0Antar","year":"2019"},{"key":"10.1016\/j.eswa.2021.116424_b35","series-title":"International symposium on visual computing","first-page":"291","article-title":"Hierarchical action classification with network pruning","author":"Davoodikakhki","year":"2020"},{"key":"10.1016\/j.eswa.2021.116424_b36","doi-asserted-by":"crossref","first-page":"168297","DOI":"10.1109\/ACCESS.2020.3023599","article-title":"Infrared and 3d skeleton feature fusion for rgb-d action recognition","volume":"8","author":"De Boissiere","year":"2020","journal-title":"IEEE Access"},{"key":"10.1016\/j.eswa.2021.116424_b37","doi-asserted-by":"crossref","unstructured":"Demisse,\u00a0G. G., Papadopoulos,\u00a0K., Aouada,\u00a0D., & Ottersten,\u00a0B. (2018). Pose encoding for robust skeleton-based action recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops (pp. 188\u2013194).","DOI":"10.1109\/CVPRW.2018.00056"},{"key":"10.1016\/j.eswa.2021.116424_b38","series-title":"2015 3rd IAPR asian conference on pattern recognition","first-page":"579","article-title":"Skeleton based action recognition with convolutional neural network","author":"Du","year":"2015"},{"issue":"7","key":"10.1016\/j.eswa.2021.116424_b39","doi-asserted-by":"crossref","first-page":"3010","DOI":"10.1109\/TIP.2016.2552404","article-title":"Representation learning of temporal dynamics for skeleton-based action recognition","volume":"25","author":"Du","year":"2016","journal-title":"IEEE Transactions on Image Processing"},{"key":"10.1016\/j.eswa.2021.116424_b40","unstructured":"Du,\u00a0Y., Wang,\u00a0W., & Wang,\u00a0L. (2015). Hierarchical recurrent neural network for skeleton based action recognition. In IEEE conference on computer vision and pattern recognition (pp. 1110\u20131118)."},{"key":"10.1016\/j.eswa.2021.116424_b41","series-title":"Revisiting skeleton-based action recognition","author":"Duan","year":"2021"},{"issue":"1","key":"10.1016\/j.eswa.2021.116424_b42","doi-asserted-by":"crossref","first-page":"177","DOI":"10.1109\/TRO.2013.2279412","article-title":"3-d mapping with an RGB-d camera","volume":"30","author":"Endres","year":"2013","journal-title":"IEEE Transactions on Robotics"},{"key":"10.1016\/j.eswa.2021.116424_b43","doi-asserted-by":"crossref","unstructured":"Feichtenhofer,\u00a0C., Pinz,\u00a0A., & Zisserman,\u00a0A. (2016). Convolutional two-stream network fusion for video action recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1933\u20131941).","DOI":"10.1109\/CVPR.2016.213"},{"key":"10.1016\/j.eswa.2021.116424_b44","series-title":"International conference image analysis and recognition","first-page":"373","article-title":"Human action recognition using fusion of depth and inertial sensors","author":"Fuad","year":"2018"},{"key":"10.1016\/j.eswa.2021.116424_b45","doi-asserted-by":"crossref","first-page":"36","DOI":"10.1016\/j.neucom.2016.05.094","article-title":"Infar dataset: Infrared action recognition at different times","volume":"212","author":"Gao","year":"2016","journal-title":"Neurocomputing"},{"key":"10.1016\/j.eswa.2021.116424_b46","doi-asserted-by":"crossref","unstructured":"Gao,\u00a0X., Hu,\u00a0W., Tang,\u00a0J., Liu,\u00a0J., & Guo,\u00a0Z. (2019). Optimized skeleton-based action recognition via sparsified graph regression. In Proceedings of the 27th ACM international conference on multimedia (pp. 601\u2013610).","DOI":"10.1145\/3343031.3351170"},{"key":"10.1016\/j.eswa.2021.116424_b47","doi-asserted-by":"crossref","first-page":"43110","DOI":"10.1109\/ACCESS.2019.2907071","article-title":"RGB-D-based object recognition using multimodal convolutional neural networks: A survey","volume":"7","author":"Gao","year":"2019","journal-title":"IEEE Access"},{"key":"10.1016\/j.eswa.2021.116424_b48","series-title":"2011 International conference on computer vision","first-page":"2595","article-title":"A \u201cstring of feature graphs\u201d model for recognition of complex activities in natural videos","author":"Gaur","year":"2011"},{"issue":"1","key":"10.1016\/j.eswa.2021.116424_b49","doi-asserted-by":"crossref","first-page":"88","DOI":"10.1109\/MCE.2017.2755498","article-title":"Kinect sensor gesture and activity recognition: New applications for consumer cognitive systems","volume":"7","author":"Gavrilova","year":"2017","journal-title":"IEEE Consumer Electronics Magazine"},{"key":"10.1016\/j.eswa.2021.116424_b50","series-title":"Motion-based recognition","first-page":"147","article-title":"Human activity recognition","author":"Goddard","year":"1997"},{"key":"10.1016\/j.eswa.2021.116424_b51","series-title":"2013 IEEE conference on computer vision and pattern recognition workshops","first-page":"676","article-title":"THETIS: Three dimensional tennis shots a human action dataset","author":"Gourgari","year":"2013"},{"key":"10.1016\/j.eswa.2021.116424_b52","doi-asserted-by":"crossref","DOI":"10.1155\/2017\/5843504","article-title":"A remote health monitoring system for the elderly based on smart home gateway","volume":"2017","author":"Guan","year":"2017","journal-title":"Journal of Healthcare Engineering"},{"key":"10.1016\/j.eswa.2021.116424_b53","doi-asserted-by":"crossref","first-page":"415","DOI":"10.3389\/fbioe.2020.00415","article-title":"Automatic pose recognition for monitoring dangerous situations in ambient-assisted living","volume":"8","author":"Guerra","year":"2020","journal-title":"Frontiers in Bioengineering and Biotechnology"},{"issue":"10","key":"10.1016\/j.eswa.2021.116424_b54","doi-asserted-by":"crossref","first-page":"5267","DOI":"10.1007\/s00521-020-05297-5","article-title":"CGA: A New feature selection model for visual human action recognition","volume":"33","author":"Guha","year":"2021","journal-title":"Neural Computing and Applications"},{"key":"10.1016\/j.eswa.2021.116424_b55","series-title":"2020 IEEE 23rd international conference on intelligent transportation systems","first-page":"1","article-title":"Recognition and 3D localization of pedestrian actions from monocular video","author":"Hayakawa","year":"2020"},{"issue":"1","key":"10.1016\/j.eswa.2021.116424_b56","doi-asserted-by":"crossref","first-page":"16","DOI":"10.1049\/iet-cvi.2017.0062","article-title":"Skeleton-based human activity recognition for elderly monitoring systems","volume":"12","author":"Hbali","year":"2018","journal-title":"IET Computer Vision"},{"issue":"07","key":"10.1016\/j.eswa.2021.116424_b57","first-page":"01","article-title":"Implementation of Covid-19 social distance detection and suspicious human behavior recognition using machine learning","volume":"4","author":"HN","year":"2021","journal-title":"IJO-International Journal of Electrical and Electronics Engineering"},{"key":"10.1016\/j.eswa.2021.116424_b58","series-title":"2017 4th International conference on advances in electrical engineering","first-page":"99","article-title":"Real time direction-sensitive fall detection system using accelerometer and learning classifier","author":"Hossain","year":"2017"},{"key":"10.1016\/j.eswa.2021.116424_b59","doi-asserted-by":"crossref","unstructured":"Hu,\u00a0J.-F., Zheng,\u00a0W.-S., Lai,\u00a0J., & Zhang,\u00a0J. (2015). Jointly learning heterogeneous features for RGB-D activity recognition. In Proceedings Of The IEEE conference on computer vision and pattern recognition (pp. 5344\u20135352).","DOI":"10.1109\/CVPR.2015.7299172"},{"key":"10.1016\/j.eswa.2021.116424_b60","doi-asserted-by":"crossref","unstructured":"Hu,\u00a0J.-F., Zheng,\u00a0W.-S., Pan,\u00a0J., Lai,\u00a0J., & Zhang,\u00a0J. (2018). Deep bilinear learning for rgb-d action recognition. In Proceedings of the European conference on computer vision (pp. 335\u2013351).","DOI":"10.1007\/978-3-030-01234-2_21"},{"key":"10.1016\/j.eswa.2021.116424_b61","series-title":"Densely connected convolutional networks","author":"Huang","year":"2018"},{"key":"10.1016\/j.eswa.2021.116424_b62","doi-asserted-by":"crossref","unstructured":"Huang,\u00a0Z., & Van\u00a0Gool,\u00a0L. (2017). A riemannian network for spd matrix learning. In AAAI conference on artificial intelligence.","DOI":"10.1609\/aaai.v31i1.10866"},{"key":"10.1016\/j.eswa.2021.116424_b63","doi-asserted-by":"crossref","unstructured":"Huang,\u00a0Z., Wan,\u00a0C., Probst,\u00a0T., & Van Gool,\u00a0L. (2017). Deep learning on lie groups for skeleton-based action recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 6099\u20136108).","DOI":"10.1109\/CVPR.2017.137"},{"key":"10.1016\/j.eswa.2021.116424_b64","doi-asserted-by":"crossref","unstructured":"Huang,\u00a0Z., Wu,\u00a0J., & Van\u00a0Gool,\u00a0L. (2018). Building deep networks on Grassmann manifolds. In Thirty-second AAAI conference on artificial intelligence.","DOI":"10.1609\/aaai.v32i1.11725"},{"issue":"5","key":"10.1016\/j.eswa.2021.116424_b65","doi-asserted-by":"crossref","DOI":"10.1109\/TII.2019.2910876","article-title":"Encoding pose features to images with data augmentation for 3-D action recognition","volume":"16","author":"Huynh-The","year":"2019","journal-title":"IEEE Transactions on Industrial Informatics"},{"key":"10.1016\/j.eswa.2021.116424_b66","doi-asserted-by":"crossref","first-page":"112","DOI":"10.1016\/j.ins.2019.10.047","article-title":"Image representation of pose-transition feature for 3D skeleton-based action recognition","volume":"513","author":"Huynh-The","year":"2020","journal-title":"Information Sciences"},{"key":"10.1016\/j.eswa.2021.116424_b67","series-title":"ICASSP 2020-2020 IEEE international conference on acoustics, speech and signal processing","first-page":"2353","article-title":"Learning geometric features with dual\u2013stream CNN for 3D action recognition","author":"Huynh-The","year":"2020"},{"key":"10.1016\/j.eswa.2021.116424_b68","series-title":"2017 IEEE International conference on multisensor fusion and integration for intelligent systems","first-page":"278","article-title":"Multi-modal human action recognition using deep neural networks fusing image and inertial sensor data","author":"Hwang","year":"2017"},{"key":"10.1016\/j.eswa.2021.116424_b69","doi-asserted-by":"crossref","unstructured":"Ibrahim,\u00a0M. S., Muralidharan,\u00a0S., Deng,\u00a0Z., Vahdat,\u00a0A., & Mori,\u00a0G. (2016). A hierarchical deep temporal model for group activity recognition. In Proceedings Of The IEEE conference on computer vision and pattern recognition (pp. 1971\u20131980).","DOI":"10.1109\/CVPR.2016.217"},{"issue":"1","key":"10.1016\/j.eswa.2021.116424_b70","doi-asserted-by":"crossref","first-page":"189","DOI":"10.1007\/s12652-019-01239-9","article-title":"Evaluating fusion of RGB-D and inertial sensors for multimodal human action recognition","volume":"11","author":"Imran","year":"2020","journal-title":"Journal of Ambient Intelligence and Humanized Computing"},{"key":"10.1016\/j.eswa.2021.116424_b71","doi-asserted-by":"crossref","unstructured":"Iosifidis,\u00a0A., Tefas,\u00a0A., & Pitas,\u00a0I. (2013). Multi-view Human Action Recognition: A Survey. In 2013 Ninth international conference on intelligent information hiding and multimedia signal processing (pp. 522\u2013525).","DOI":"10.1109\/IIH-MSP.2013.135"},{"key":"10.1016\/j.eswa.2021.116424_b72","series-title":"2020 IEEE\/RSJ international conference on intelligent robots and systems","first-page":"10285","article-title":"Hamlet: A hierarchical multimodal attention-based human activity recognition algorithm","author":"Islam","year":"2020"},{"key":"10.1016\/j.eswa.2021.116424_b73","series-title":"Artificial intelligence evolution","first-page":"109","article-title":"Remarkable skeleton based human action recognition","author":"Jaiswal","year":"2020"},{"issue":"4","key":"10.1016\/j.eswa.2021.116424_b74","article-title":"A depth video-based human detection and activity recognition using multi-features and embedded hidden Markov models for health care monitoring systems","volume":"4","author":"Jalal","year":"2017","journal-title":"International Journal of Interactive Multimedia & Artificial Intelligence"},{"key":"10.1016\/j.eswa.2021.116424_b75","doi-asserted-by":"crossref","first-page":"295","DOI":"10.1016\/j.patcog.2016.08.003","article-title":"Robust human activity recognition from depth video using spatiotemporal multi-fused features","volume":"61","author":"Jalal","year":"2017","journal-title":"Pattern Recognition"},{"key":"10.1016\/j.eswa.2021.116424_b76","article-title":"Vision-based human action recognition: An overview and real world challenges","volume":"32","author":"Jegham","year":"2020","journal-title":"Forensic Science International: Digital Investigation"},{"key":"10.1016\/j.eswa.2021.116424_b77","series-title":"A large-scale varying-view RGB-D action dataset for arbitrary-view human action recognition","author":"Ji","year":"2019"},{"issue":"1","key":"10.1016\/j.eswa.2021.116424_b78","doi-asserted-by":"crossref","first-page":"221","DOI":"10.1109\/TPAMI.2012.59","article-title":"3D convolutional neural networks for human action recognition","volume":"35","author":"Ji","year":"2012","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"10.1016\/j.eswa.2021.116424_b79","doi-asserted-by":"crossref","unstructured":"Jiang,\u00a0Z., Rozgic,\u00a0V., & Adali,\u00a0S. (2017). Learning spatiotemporal features for infrared action recognition with 3d convolutional neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops (pp. 115\u2013123).","DOI":"10.1109\/CVPRW.2017.44"},{"key":"10.1016\/j.eswa.2021.116424_b80","doi-asserted-by":"crossref","DOI":"10.1109\/TCSVT.2019.2914137","article-title":"Action recognition scheme based on skeleton representation with DS-LSTM network","author":"Jiang","year":"2020","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"10.1016\/j.eswa.2021.116424_b81","series-title":"2016 Sixth international conference on instrumentation measurement, computer, communication and control","first-page":"707","article-title":"Fall recognition approach based on human skeleton information","author":"Kai","year":"2016"},{"key":"10.1016\/j.eswa.2021.116424_b82","series-title":"2020 International conference on computer communication and informatics","first-page":"1","article-title":"A survey on deep learning techniques for human action recognition","author":"Karthickkumar","year":"2020"},{"issue":"2","key":"10.1016\/j.eswa.2021.116424_b83","doi-asserted-by":"crossref","first-page":"627","DOI":"10.3390\/s18020627","article-title":"Temporal and fine-grained pedestrian action recognition on driving recorder database","volume":"18","author":"Kataoka","year":"2018","journal-title":"Sensors"},{"key":"10.1016\/j.eswa.2021.116424_b84","doi-asserted-by":"crossref","unstructured":"Ke,\u00a0Q., Bennamoun,\u00a0M., An,\u00a0S., Sohel,\u00a0F., & Boussaid,\u00a0F. (2017). A new representation of skeleton sequences for 3d action recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (pp. 3288\u20133297).","DOI":"10.1109\/CVPR.2017.486"},{"issue":"6","key":"10.1016\/j.eswa.2021.116424_b85","doi-asserted-by":"crossref","first-page":"2842","DOI":"10.1109\/TIP.2018.2812099","article-title":"Learning clip representations for skeleton-based 3d action recognition","volume":"27","author":"Ke","year":"2018","journal-title":"IEEE Transactions on Image Processing"},{"key":"10.1016\/j.eswa.2021.116424_b86","doi-asserted-by":"crossref","first-page":"959","DOI":"10.1109\/TIP.2019.2937757","article-title":"Learning latent global network for skeleton-based action prediction","volume":"29","author":"Ke","year":"2019","journal-title":"IEEE Transactions on Image Processing"},{"key":"10.1016\/j.eswa.2021.116424_b87","doi-asserted-by":"crossref","first-page":"107","DOI":"10.1016\/j.patrec.2018.04.035","article-title":"Combining CNN streams of RGB-D and skeletal data for human activity recognition","volume":"115","author":"Khaire","year":"2018","journal-title":"Pattern Recognition Letters"},{"key":"10.1016\/j.eswa.2021.116424_b88","unstructured":"Kipf, T. (2016). Graph convolutional networks. https:\/\/tkipf.github.io\/graph-convolutional-networks\/."},{"issue":"8","key":"10.1016\/j.eswa.2021.116424_b89","doi-asserted-by":"crossref","first-page":"951","DOI":"10.1177\/0278364913478446","article-title":"Learning human activities and object affordances from rgb-d videos","volume":"32","author":"Koppula","year":"2013","journal-title":"The International Journal Of Robotics Research"},{"issue":"2","key":"10.1016\/j.eswa.2021.116424_b90","first-page":"111","article-title":"Data preprocessing for supervised leaning","volume":"1","author":"Kotsiantis","year":"2006","journal-title":"International Journal of Computer Science"},{"issue":"8","key":"10.1016\/j.eswa.2021.116424_b91","doi-asserted-by":"crossref","first-page":"6","DOI":"10.1167\/6.8.6","article-title":"Visual perception of biological motion by form: A template-matching analysis","volume":"6","author":"Lange","year":"2006","journal-title":"Journal of Vision"},{"issue":"10","key":"10.1016\/j.eswa.2021.116424_b92","doi-asserted-by":"crossref","first-page":"2886","DOI":"10.3390\/s20102886","article-title":"Real-time human action recognition with a low-cost RGB camera and mobile robot platform","volume":"20","author":"Lee","year":"2020","journal-title":"Sensors"},{"issue":"10","key":"10.1016\/j.eswa.2021.116424_b93","doi-asserted-by":"crossref","first-page":"2886","DOI":"10.3390\/s20102886","article-title":"Real-time human action recognition with a low-cost RGB camera and mobile robot platform","volume":"20","author":"Lee","year":"2020","journal-title":"Sensors"},{"key":"10.1016\/j.eswa.2021.116424_b94","series-title":"2009 IEEE international conference on multimedia and expo","first-page":"614","article-title":"Real-time pedestrian and vehicle detection in video using 3D cues","author":"Lee","year":"2009"},{"key":"10.1016\/j.eswa.2021.116424_b95","article-title":"Symbiotic graph neural networks for 3d skeleton-based human action recognition and motion prediction","author":"Li","year":"2021","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"10.1016\/j.eswa.2021.116424_b96","series-title":"2020 IEEE international conference on multimedia expo workshops","first-page":"1","article-title":"Efficient fitness action analysis based on spatio-temporal feature encoding","author":"Li","year":"2020"},{"key":"10.1016\/j.eswa.2021.116424_b97","doi-asserted-by":"crossref","unstructured":"Li,\u00a0C., Cui,\u00a0Z., Zheng,\u00a0W., Xu,\u00a0C., & Yang,\u00a0J. (2018). Spatio-temporal graph convolution for skeleton based action recognition. In Thirty-second AAAI conference on artificial intelligence.","DOI":"10.1609\/aaai.v32i1.11776"},{"issue":"5","key":"10.1016\/j.eswa.2021.116424_b98","doi-asserted-by":"crossref","first-page":"624","DOI":"10.1109\/LSP.2017.2678539","article-title":"Joint distance maps based action recognition with convolutional neural networks","volume":"24","author":"Li","year":"2017","journal-title":"IEEE Signal Processing Letters"},{"key":"10.1016\/j.eswa.2021.116424_b99","series-title":"2019 IEEE visual communications and image processing","first-page":"1","article-title":"3D human skeleton data compression for action recognition","author":"Li","year":"2019"},{"key":"10.1016\/j.eswa.2021.116424_b100","doi-asserted-by":"crossref","unstructured":"Li,\u00a0S., Li,\u00a0W., Cook,\u00a0C., Zhu,\u00a0C., & Gao,\u00a0Y. (2018). Independently recurrent neural network (indrnn): Building a longer and deeper rnn. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 5457\u20135466).","DOI":"10.1109\/CVPR.2018.00572"},{"key":"10.1016\/j.eswa.2021.116424_b101","series-title":"European conference on computer vision","first-page":"420","article-title":"Hard-net: Hardness-aware discrimination network for 3d early activity prediction","author":"Li","year":"2020"},{"key":"10.1016\/j.eswa.2021.116424_b102","doi-asserted-by":"crossref","unstructured":"Li,\u00a0T., Liu,\u00a0J., Zhang,\u00a0W., Ni,\u00a0Y., Wang,\u00a0W., & Li,\u00a0Z. (2021). UAV-Human: A Large Benchmark for Human Behavior Understanding with Unmanned Aerial Vehicles. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 16266\u201316275).","DOI":"10.1109\/CVPR46437.2021.01600"},{"key":"10.1016\/j.eswa.2021.116424_b103","series-title":"2017 IEEE international conference on multimedia & expo workshops","first-page":"585","article-title":"Skeleton-based action recognition using LSTM and CNN","author":"Li","year":"2017"},{"key":"10.1016\/j.eswa.2021.116424_b104","article-title":"Memory attention networks for skeleton-based action recognition","author":"Li","year":"2021","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"issue":"1","key":"10.1016\/j.eswa.2021.116424_b105","doi-asserted-by":"crossref","first-page":"95","DOI":"10.1007\/s10846-019-01049-3","article-title":"Deep-learning-based human intention prediction using RGB images and optical flow","volume":"97","author":"Li","year":"2020","journal-title":"Journal of Intelligent and Robotic Systems"},{"key":"10.1016\/j.eswa.2021.116424_b106","series-title":"2010 IEEE computer society conference on computer vision and pattern recognition-workshops","first-page":"9","article-title":"Action recognition based on a bag of 3d points","author":"Li","year":"2010"},{"key":"10.1016\/j.eswa.2021.116424_b107","series-title":"2017 IEEE international conference on multimedia & expo workshops","first-page":"597","article-title":"Skeleton-based action recognition with convolutional neural networks","author":"Li","year":"2017"},{"key":"10.1016\/j.eswa.2021.116424_b108","series-title":"Co-occurrence feature learning from skeleton data for action recognition and detection with hierarchical aggregation","author":"Li","year":"2018"},{"key":"10.1016\/j.eswa.2021.116424_b109","series-title":"2017 Asia-pacific signal and information processing association annual summit and conference","first-page":"386","article-title":"Automated classroom monitoring with connected visioning system","author":"Lim","year":"2017"},{"key":"10.1016\/j.eswa.2021.116424_b110","series-title":"Proceedings Of The 20th ACM international conference on multimedia","first-page":"1053","article-title":"Human action recognition and retrieval using sole depth information","author":"Lin","year":"2012"},{"key":"10.1016\/j.eswa.2021.116424_b111","series-title":"2017 IEEE international conference on image processing","first-page":"4547","article-title":"Human-human interaction recognition based on spatial and motion trend feature","author":"Liu","year":"2017"},{"key":"10.1016\/j.eswa.2021.116424_b112","series-title":"2017 IEEE international conference on multimedia & expo workshops","first-page":"623","article-title":"3D action recognition using multi-temporal skeleton visualization","author":"Liu","year":"2017"},{"key":"10.1016\/j.eswa.2021.116424_b113","doi-asserted-by":"crossref","unstructured":"Liu,\u00a0W., Liao,\u00a0S., Ren,\u00a0W., Hu,\u00a0W., & Yu,\u00a0Y. (2019). High-level Semantic Feature Detection: A New Perspective for Pedestrian Detection. In IEEE conference on computer vision and pattern recognition.","DOI":"10.1109\/CVPR.2019.00533"},{"key":"10.1016\/j.eswa.2021.116424_b114","series-title":"2016 IEEE international conference on healthcare informatics","first-page":"24","article-title":"Human daily activity recognition for healthcare using wearable and visual sensing data","author":"Liu","year":"2016"},{"key":"10.1016\/j.eswa.2021.116424_b115","doi-asserted-by":"crossref","DOI":"10.1155\/2018\/1696924","article-title":"Smart wearables in healthcare: Signal processing, device development, and clinical applications","volume":"2018","author":"Liu","year":"2018","journal-title":"Journal of Healthcare Engineering"},{"issue":"6","key":"10.1016\/j.eswa.2021.116424_b116","doi-asserted-by":"crossref","first-page":"848","DOI":"10.1109\/LSP.2018.2823910","article-title":"Global temporal representation based CNNs for infrared action recognition","volume":"25","author":"Liu","year":"2018","journal-title":"IEEE Signal Processing Letters"},{"key":"10.1016\/j.eswa.2021.116424_b117","article-title":"Transferable feature representation for visible-to-infrared cross-dataset human action recognition","volume":"2018","author":"Liu","year":"2018","journal-title":"Complexity"},{"key":"10.1016\/j.eswa.2021.116424_b118","article-title":"Ntu rgb+ d 120: A large-scale benchmark for 3d human activity understanding","author":"Liu","year":"2019","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"issue":"12","key":"10.1016\/j.eswa.2021.116424_b119","doi-asserted-by":"crossref","first-page":"3007","DOI":"10.1109\/TPAMI.2017.2771306","article-title":"Skeleton-based action recognition using spatio-temporal lstm network with trust gates","volume":"40","author":"Liu","year":"2017","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"10.1016\/j.eswa.2021.116424_b120","series-title":"European conference on computer vision","first-page":"816","article-title":"Spatio-temporal lstm with trust gates for 3d human action recognition","author":"Liu","year":"2016"},{"issue":"18","key":"10.1016\/j.eswa.2021.116424_b121","doi-asserted-by":"crossref","first-page":"7570","DOI":"10.1109\/JSEN.2018.2859268","article-title":"Human action recognition using a distributed RGB-depth camera network","volume":"18","author":"Liu","year":"2018","journal-title":"IEEE Sensors Journal"},{"key":"10.1016\/j.eswa.2021.116424_b122","doi-asserted-by":"crossref","unstructured":"Liu,\u00a0J., Wang,\u00a0G., Hu,\u00a0P., Duan,\u00a0L., & Kot,\u00a0A. C. (2017). Global Context-Aware Attention LSTM Networks for 3D Action Recognition. In 2017 IEEE conference on computer vision and pattern recognition.","DOI":"10.1109\/CVPR.2017.391"},{"key":"10.1016\/j.eswa.2021.116424_b123","doi-asserted-by":"crossref","unstructured":"Liu,\u00a0M., & Yuan,\u00a0J. (2018). Recognizing human actions as the evolution of pose estimation maps. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1159\u20131168).","DOI":"10.1109\/CVPR.2018.00127"},{"key":"10.1016\/j.eswa.2021.116424_b124","doi-asserted-by":"crossref","DOI":"10.1016\/j.measurement.2020.108288","article-title":"A hybrid deep transfer learning model with machine learning methods for face mask detection in the era of the COVID-19 pandemic","volume":"167","author":"Loey","year":"2021","journal-title":"Measurement"},{"key":"10.1016\/j.eswa.2021.116424_b125","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1109\/TPAMI.2020.2976014","article-title":"Multi-task deep learning for real-time 3D human pose estimation and action recognition","author":"Luvizon","year":"2020","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"10.1016\/j.eswa.2021.116424_b126","doi-asserted-by":"crossref","unstructured":"Lv,\u00a0F., & Nevatia,\u00a0R. (2007). Single View Human Action Recognition using Key Pose Matching and Viterbi Path Searching. In 2007 IEEE conference on computer vision and pattern recognition (pp. 1\u20138).","DOI":"10.1109\/CVPR.2007.383131"},{"issue":"10","key":"10.1016\/j.eswa.2021.116424_b127","doi-asserted-by":"crossref","first-page":"2492","DOI":"10.1109\/TMM.2019.2904880","article-title":"Yoganet: 3-D yoga asana recognition using joint angular displacement maps with ConvNets","volume":"21","author":"Maddala","year":"2019","journal-title":"IEEE Transactions On Multimedia"},{"key":"10.1016\/j.eswa.2021.116424_b128","article-title":"Vision and inertial sensing fusion for human action recognition: A review","author":"Majumder","year":"2020","journal-title":"IEEE Sensors Journal"},{"issue":"2","key":"10.1016\/j.eswa.2021.116424_b129","doi-asserted-by":"crossref","first-page":"1132","DOI":"10.1109\/LRA.2019.2894389","article-title":"Activity recognition for ergonomics assessment of industrial tasks with automatic feature selection","volume":"4","author":"Malais\u00e9","year":"2019","journal-title":"IEEE Robotics and Automation Letters"},{"key":"10.1016\/j.eswa.2021.116424_b130","doi-asserted-by":"crossref","unstructured":"Martin,\u00a0M., Roitberg,\u00a0A., Haurilet,\u00a0M., Horne,\u00a0M., Reiss,\u00a0S., Voit,\u00a0M., & Stiefelhagen,\u00a0R. (2019). Drive&Act: A Multi-Modal Dataset for Fine-Grained Driver Behavior Recognition in Autonomous Vehicles. In Proceedings of the IEEE\/CVF international conference on computer vision.","DOI":"10.1109\/ICCV.2019.00289"},{"key":"10.1016\/j.eswa.2021.116424_b131","series-title":"2015 14th IAPR international conference on machine vision applications","first-page":"329","article-title":"Action recognition in bed using BAMs for assisted living and elderly care","author":"Martinez","year":"2015"},{"key":"10.1016\/j.eswa.2021.116424_b132","series-title":"2019 16th Conference on computer and robot vision","first-page":"49","article-title":"STAR-net: action recognition using spatio-temporal activation reprojection","author":"McNally","year":"2019"},{"key":"10.1016\/j.eswa.2021.116424_b133","series-title":"Gimme signals: Discriminative signal encoding for multimodal activity recognition","author":"Memmesheimer","year":"2020"},{"key":"10.1016\/j.eswa.2021.116424_b134","doi-asserted-by":"crossref","unstructured":"Ming,\u00a0Y., Ruan,\u00a0Q., & Hauptmann,\u00a0A. G. (2012). Activity Recognition from RGB-D Camera with 3D Local Spatio-temporal Features. In 2012 IEEE international conference on multimedia and expo (pp. 344\u2013349).","DOI":"10.1109\/ICME.2012.8"},{"issue":"10","key":"10.1016\/j.eswa.2021.116424_b135","doi-asserted-by":"crossref","first-page":"11461","DOI":"10.1109\/JSEN.2020.3015726","article-title":"A new framework for smartphone sensor-based human activity recognition using graph neural network","volume":"21","author":"Mondal","year":"2020","journal-title":"IEEE Sensors Journal"},{"key":"10.1016\/j.eswa.2021.116424_b136","series-title":"Handbook of computational intelligence in biomedical engineering and healthcare","first-page":"343","article-title":"A study on smartphone sensor-based human activity recognition using deep learning approaches","author":"Mondal","year":"2021"},{"key":"10.1016\/j.eswa.2021.116424_b137","doi-asserted-by":"crossref","unstructured":"Mora,\u00a0S. V., & Knottenbelt,\u00a0W. J. (2017). Deep Learning for Domain-Specific Action Recognition in Tennis. In 2017 IEEE conference on computer vision and pattern recognition workshops (pp. 170\u2013178).","DOI":"10.1109\/CVPRW.2017.27"},{"issue":"4","key":"10.1016\/j.eswa.2021.116424_b138","doi-asserted-by":"crossref","first-page":"921","DOI":"10.1016\/j.jestch.2019.10.008","article-title":"Action recognition in freestyle wrestling using silhouette-skeleton features","volume":"23","author":"Mottaghi","year":"2020","journal-title":"Engineering Science and Technology, An International Journal"},{"issue":"41","key":"10.1016\/j.eswa.2021.116424_b139","doi-asserted-by":"crossref","first-page":"31663","DOI":"10.1007\/s11042-020-09537-7","article-title":"EnsemConvNet: a deep learning approach for human activity recognition using smartphone sensors for healthcare applications","volume":"79","author":"Mukherjee","year":"2020","journal-title":"Multimedia Tools and Applications"},{"key":"10.1016\/j.eswa.2021.116424_b140","series-title":"Documentation mocap database hdm05","author":"M\u00fcller","year":"2007"},{"key":"10.1016\/j.eswa.2021.116424_b141","series-title":"Person re-identification","first-page":"161","article-title":"One-shot person re-identification with a consumer depth camera","author":"Munaro","year":"2014"},{"key":"10.1016\/j.eswa.2021.116424_b142","unstructured":"Narkhede, A. H. (2019). Human Activity Recognition Based on Multimodal Body Sensing."},{"key":"10.1016\/j.eswa.2021.116424_b143","series-title":"2011 IEEE International conference on computer vision workshops","first-page":"1147","article-title":"Rgbd-hudaact: A color-depth video database for human daily activity recognition","author":"Ni","year":"2011"},{"key":"10.1016\/j.eswa.2021.116424_b144","series-title":"2014 22nd international conference on pattern recognition","first-page":"1946","article-title":"Capturing global and local dynamics for human action recognition","author":"Nie","year":"2014"},{"key":"10.1016\/j.eswa.2021.116424_b145","doi-asserted-by":"crossref","first-page":"14","DOI":"10.1016\/j.cviu.2014.12.005","article-title":"A generative restricted Boltzmann machine based method for high-dimensional motion data modeling","volume":"136","author":"Nie","year":"2015","journal-title":"Computer Vision and Image Understanding"},{"key":"10.1016\/j.eswa.2021.116424_b146","series-title":"2004 IEEE international conference on multimedia and expo. Vol. 1","first-page":"719","article-title":"Human activity detection and recognition for video surveillance","author":"Niu","year":"2004"},{"key":"10.1016\/j.eswa.2021.116424_b147","series-title":"2013 IEEE workshop on applications of computer vision","first-page":"53","article-title":"Berkeley mhad: A comprehensive multimodal human action database","author":"Ofli","year":"2013"},{"key":"10.1016\/j.eswa.2021.116424_b148","series-title":"Proceedings of the 2018 10th international conference on machine learning and computing","first-page":"41","article-title":"Human activity recognition for healthcare using smartphones","author":"Ogbuabor","year":"2018"},{"key":"10.1016\/j.eswa.2021.116424_b149","doi-asserted-by":"crossref","first-page":"68022","DOI":"10.1109\/ACCESS.2019.2917125","volume":"7","author":"Oguntala","year":"2019","journal-title":"IEEE Access"},{"key":"10.1016\/j.eswa.2021.116424_b150","doi-asserted-by":"crossref","unstructured":"Oreifej,\u00a0O., & Liu,\u00a0Z. (2013). Hon4d: Histogram of oriented 4d normals for activity recognition from depth sequences. In Proceedings Of The IEEE conference on computer vision and pattern recognition (pp. 716\u2013723).","DOI":"10.1109\/CVPR.2013.98"},{"key":"10.1016\/j.eswa.2021.116424_b151","doi-asserted-by":"crossref","unstructured":"Pande,\u00a0S., Banerjee,\u00a0A., Kumar,\u00a0S., Banerjee,\u00a0B., & Chaudhuri,\u00a0S. (2019). An adversarial approach to discriminative modality distillation for remote sensing image classification. In Proceedings of the IEEE\/CVF international conference on computer vision workshops.","DOI":"10.1109\/ICCVW.2019.00558"},{"key":"10.1016\/j.eswa.2021.116424_b152","series-title":"International conference on multimedia modeling","first-page":"473","article-title":"Real-time skeleton-tracking-based human action recognition using kinect data","author":"Papadopoulos","year":"2014"},{"key":"10.1016\/j.eswa.2021.116424_b153","series-title":"2018 Tenth international conference on ubiquitous and future networks","first-page":"868","article-title":"Video surveillance system based on 3D action recognition","author":"Park","year":"2018"},{"key":"10.1016\/j.eswa.2021.116424_b154","doi-asserted-by":"crossref","unstructured":"Peng,\u00a0W., Hong,\u00a0X., Chen,\u00a0H., & Zhao,\u00a0G. (2020). Learning Graph Convolutional Network for Skeleton-Based Human Action Recognition by Neural Searching. In AAAI (pp. 2669\u20132676).","DOI":"10.1609\/aaai.v34i03.5652"},{"key":"10.1016\/j.eswa.2021.116424_b155","doi-asserted-by":"crossref","unstructured":"Polla,\u00a0F., Laurent,\u00a0H., & Emile,\u00a0B. (2020). A Hierarchical Approach for Indoor Action Recognition from New Infrared Sensor Preserving Anonymity. In VISIGRAPP (4: VISAPP) (pp. 229\u2013236).","DOI":"10.5220\/0008942002290236"},{"key":"10.1016\/j.eswa.2021.116424_b156","series-title":"Action recognition from variable viewpoints: Towards a safer living environment for elderly","author":"Priester","year":"2016"},{"key":"10.1016\/j.eswa.2021.116424_b157","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2020.04.034","article-title":"DTMMN: Deep transfer multi-metric network for RGB-D action recognition","author":"Qin","year":"2020","journal-title":"Neurocomputing"},{"key":"10.1016\/j.eswa.2021.116424_b158","doi-asserted-by":"crossref","first-page":"80","DOI":"10.1016\/j.inffus.2019.06.014","article-title":"Imaging and fusing time series for wearable sensor-based human activity recognition","volume":"53","author":"Qin","year":"2020","journal-title":"Information Fusion"},{"key":"10.1016\/j.eswa.2021.116424_b159","series-title":"International conference of pioneering computer scientists, engineers and educators","first-page":"3","article-title":"Survey on deep learning for human action recognition","author":"Qiu","year":"2019"},{"key":"10.1016\/j.eswa.2021.116424_b160","doi-asserted-by":"crossref","unstructured":"Rahmani,\u00a0H., & Bennamoun,\u00a0M. (2017). Learning action recognition model from depth and skeleton videos. In Proceedings of the IEEE international conference on computer vision (pp. 5832\u20135841).","DOI":"10.1109\/ICCV.2017.621"},{"key":"10.1016\/j.eswa.2021.116424_b161","series-title":"European conference on computer vision","first-page":"742","article-title":"HOPC: Histogram of oriented principal components of 3D pointclouds for action recognition","author":"Rahmani","year":"2014"},{"issue":"12","key":"10.1016\/j.eswa.2021.116424_b162","doi-asserted-by":"crossref","first-page":"2430","DOI":"10.1109\/TPAMI.2016.2533389","article-title":"Histogram of oriented principal components for cross-view action recognition","volume":"38","author":"Rahmani","year":"2016","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"10.1016\/j.eswa.2021.116424_b163","series-title":"Study on machine learning and deep learning methods for human action recognition","author":"Rajendran","year":"2020"},{"issue":"8","key":"10.1016\/j.eswa.2021.116424_b164","doi-asserted-by":"crossref","DOI":"10.1177\/1550147716665520","article-title":"A review on applications of activity recognition systems with regard to performance and evaluation","volume":"12","author":"Ranasinghe","year":"2016","journal-title":"International Journal of Distributed Sensor Networks"},{"issue":"3","key":"10.1016\/j.eswa.2021.116424_b165","doi-asserted-by":"crossref","first-page":"768","DOI":"10.3390\/s21030768","article-title":"Activity recognition for ambient assisted living with videos, inertial units and ambient sensors","volume":"21","author":"Ranieri","year":"2021","journal-title":"Sensors"},{"key":"10.1016\/j.eswa.2021.116424_b166","doi-asserted-by":"crossref","first-page":"90","DOI":"10.1016\/j.ins.2021.04.023","article-title":"Augmented skeleton based contrastive action learning with momentum lstm for unsupervised action recognition","volume":"569","author":"Rao","year":"2021","journal-title":"Information Sciences"},{"key":"10.1016\/j.eswa.2021.116424_b167","series-title":"ICDSMLA 2019","first-page":"504","article-title":"Smart surveillance and real-time human action recognition using OpenPose","author":"Rathod","year":"2020"},{"key":"10.1016\/j.eswa.2021.116424_b168","series-title":"A survey on 3D skeleton-based action recognition using learning method","author":"Ren","year":"2020"},{"key":"10.1016\/j.eswa.2021.116424_b169","series-title":"Proceedings of fifth IEEE international conference on automatic face gesture recognition","first-page":"417","article-title":"Human action recognition in smart classroom","author":"Ren","year":"2002"},{"key":"10.1016\/j.eswa.2021.116424_b170","doi-asserted-by":"crossref","unstructured":"Rey,\u00a0V. F., Hevesi,\u00a0P., Kovalenko,\u00a0O., & Lukowicz,\u00a0P. (2019). Let there be IMU data: generating training data for wearable, motion sensor based activity recognition from monocular RGB videos. In Adjunct proceedings of the 2019 ACM international joint conference on pervasive and ubiquitous computing and proceedings of the 2019 ACM international symposium on wearable computers (pp. 699\u2013708).","DOI":"10.1145\/3341162.3345590"},{"key":"10.1016\/j.eswa.2021.116424_b171","series-title":"Rising life expectancy: a global history","author":"Riley","year":"2001"},{"key":"10.1016\/j.eswa.2021.116424_b172","series-title":"2016 IEEE international conference on acoustics, speech and signal processing","first-page":"2702","article-title":"Multimodal human action recognition in assistive human-robot interaction","author":"Rodomagoulakis","year":"2016"},{"key":"10.1016\/j.eswa.2021.116424_b173","doi-asserted-by":"crossref","first-page":"13129","DOI":"10.1109\/ACCESS.2017.2789329","article-title":"Enabling technologies for the internet of health things","volume":"6","author":"Rodrigues","year":"2018","journal-title":"IEEE Access"},{"key":"10.1016\/j.eswa.2021.116424_b174","series-title":"2009 Chinese conference on pattern recognition","first-page":"1","article-title":"View-independent human action recognition based on a stereo camera","author":"Roh","year":"2009"},{"key":"10.1016\/j.eswa.2021.116424_b175","series-title":"Recognition of human action using moment-based","author":"Rosales","year":"1998"},{"key":"10.1016\/j.eswa.2021.116424_b176","series-title":"The 2012 international joint conference on neural networks","first-page":"1","article-title":"Clustering action data based on amount of exercise for use-model based health care support","author":"sato-Shimokawara","year":"2012"},{"key":"10.1016\/j.eswa.2021.116424_b177","series-title":"SPIN: A high speed, high resolution vision dataset for tracking and action recognition in ping pong","author":"Schwarcz","year":"2019"},{"key":"10.1016\/j.eswa.2021.116424_b178","doi-asserted-by":"crossref","unstructured":"Shahroudy,\u00a0A., Liu,\u00a0J., Ng,\u00a0T.-T., & Wang,\u00a0G. (2016). Ntu rgb+ d: A large scale dataset for 3d human activity analysis. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1010\u20131019).","DOI":"10.1109\/CVPR.2016.115"},{"key":"10.1016\/j.eswa.2021.116424_b179","series-title":"2014 IEEE international workshop on advanced robotics and its social impacts","first-page":"69","article-title":"3D human action segmentation and recognition using pose kinetic energy","author":"Shan","year":"2014"},{"key":"10.1016\/j.eswa.2021.116424_b180","series-title":"ICDSMLA 2019","first-page":"379","article-title":"Abnormal human behavior detection in video using suspicious object detection","author":"Sharma","year":"2020"},{"key":"10.1016\/j.eswa.2021.116424_b181","doi-asserted-by":"crossref","unstructured":"Shi,\u00a0L., Zhang,\u00a0Y., Cheng,\u00a0J., & Lu,\u00a0H. (2019). Two-stream adaptive graph convolutional networks for skeleton-based action recognition. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 12026\u201312035).","DOI":"10.1109\/CVPR.2019.01230"},{"key":"10.1016\/j.eswa.2021.116424_b182","doi-asserted-by":"crossref","unstructured":"Si,\u00a0C., Chen,\u00a0W., Wang,\u00a0W., Wang,\u00a0L., & Tan,\u00a0T. (2019). An attention enhanced graph convolutional lstm network for skeleton-based action recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1227\u20131236).","DOI":"10.1109\/CVPR.2019.00132"},{"key":"10.1016\/j.eswa.2021.116424_b183","series-title":"Advances in neural information processing systems","first-page":"568","article-title":"Two-stream convolutional networks for action recognition in videos","author":"Simonyan","year":"2014"},{"key":"10.1016\/j.eswa.2021.116424_b184","series-title":"International cross-domain conference for machine learning and knowledge extraction","first-page":"267","article-title":"Human activity recognition using recurrent neural networks","author":"Singh","year":"2017"},{"issue":"2","key":"10.1016\/j.eswa.2021.116424_b185","doi-asserted-by":"crossref","first-page":"1107","DOI":"10.1007\/s10462-018-9651-1","article-title":"Video benchmarks of human action datasets: a review","volume":"52","author":"Singh","year":"2019","journal-title":"Artificial Intelligence Review"},{"key":"10.1016\/j.eswa.2021.116424_b186","doi-asserted-by":"crossref","unstructured":"Song,\u00a0S., Lan,\u00a0C., Xing,\u00a0J., Zeng,\u00a0W., & Liu,\u00a0J. (2017). An end-to-end spatio-temporal attention model for human action recognition from skeleton data. In Thirty-first AAAI conference on artificial intelligence.","DOI":"10.1609\/aaai.v31i1.11212"},{"key":"10.1016\/j.eswa.2021.116424_b187","article-title":"Richly activated graph convolutional network for robust skeleton-based action recognition","author":"Song","year":"2020","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"10.1016\/j.eswa.2021.116424_b188","series-title":"Constructing stronger and faster baselines for skeleton-based action recognition","author":"Song","year":"2021"},{"key":"10.1016\/j.eswa.2021.116424_b189","series-title":"Computer vision in sports","first-page":"181","article-title":"Action recognition in realistic sports videos","author":"Soomro","year":"2014"},{"key":"10.1016\/j.eswa.2021.116424_b190","series-title":"Human action recognition from various data modalities: A review","author":"Sun","year":"2020"},{"key":"10.1016\/j.eswa.2021.116424_b191","unstructured":"Sung,\u00a0J., Ponce,\u00a0C., Selman,\u00a0B., & Saxena,\u00a0A. (2011). Human activity detection from RGBD images. In Proceedings Of The 16th AAAI conference on plan, activity, and intent recognition (pp. 47\u201355)."},{"key":"10.1016\/j.eswa.2021.116424_b192","series-title":"2016 IEEE international conference on multimedia and expo","first-page":"1","article-title":"Human action recognition-based video summarization for RGB-D personal sports video","author":"Tejero-de-Pablos","year":"2016"},{"key":"10.1016\/j.eswa.2021.116424_b193","unstructured":"Thakkar,\u00a0K. C., & Narayanan,\u00a0P. J. (2018). Part-based Graph Convolutional Network for Action Recognition. In British machine vision conference 2018 (p. 270)."},{"issue":"2","key":"10.1016\/j.eswa.2021.116424_b194","doi-asserted-by":"crossref","first-page":"283","DOI":"10.1007\/s10462-017-9545-7","article-title":"Suspicious human activity recognition: a review","volume":"50","author":"Tripathi","year":"2018","journal-title":"Artificial Intelligence Review"},{"key":"10.1016\/j.eswa.2021.116424_b195","series-title":"2018 Colour and visual computing symposium","first-page":"1","article-title":"A deep learning-based human activity recognition in darkness","author":"Uddin","year":"2018"},{"key":"10.1016\/j.eswa.2021.116424_b196","doi-asserted-by":"crossref","first-page":"386","DOI":"10.1016\/j.future.2019.01.029","article-title":"Action recognition using optimized deep autoencoder and CNN for surveillance data streams of non-stationary environments","volume":"96","author":"Ullah","year":"2019","journal-title":"Future Generation Computer Systems"},{"key":"10.1016\/j.eswa.2021.116424_b197","doi-asserted-by":"crossref","unstructured":"Vemulapalli,\u00a0R., & Chellapa,\u00a0R. (2016). Rolling rotations for recognizing human actions from 3d skeletal data. In Proceedings of IEEE conference on computer vision and pattern recognition (pp. 4471\u20134479).","DOI":"10.1109\/CVPR.2016.484"},{"key":"10.1016\/j.eswa.2021.116424_b198","doi-asserted-by":"crossref","DOI":"10.1007\/s00530-020-00677-2","article-title":"Deep learning-based multi-modal approach using RGB and skeleton sequences for human activity recognition","author":"Verma","year":"2020","journal-title":"Multimedia Systems"},{"key":"10.1016\/j.eswa.2021.116424_b199","doi-asserted-by":"crossref","unstructured":"Vinyes\u00a0Mora,\u00a0S., & Knottenbelt,\u00a0W. J. (2017). Deep learning for domain-specific action recognition in tennis. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops (pp. 114\u2013122).","DOI":"10.1109\/CVPRW.2017.27"},{"key":"10.1016\/j.eswa.2021.116424_b200","doi-asserted-by":"crossref","unstructured":"Wan,\u00a0B., Zhou,\u00a0D., Liu,\u00a0Y., Li,\u00a0R., & He,\u00a0X. (2019). Pose-aware multi-level feature network for human object interaction detection. In Proceedings Of The IEEE international conference on computer vision (pp. 9469\u20139478).","DOI":"10.1109\/ICCV.2019.00956"},{"key":"10.1016\/j.eswa.2021.116424_b201","doi-asserted-by":"crossref","first-page":"3","DOI":"10.1016\/j.patrec.2018.02.010","article-title":"Deep learning for sensor-based activity recognition: A survey","volume":"119","author":"Wang","year":"2019","journal-title":"Pattern Recognition Letters"},{"key":"10.1016\/j.eswa.2021.116424_b202","doi-asserted-by":"crossref","first-page":"15","DOI":"10.1109\/TIP.2019.2925285","article-title":"A comparative review of recent kinect-based action recognition algorithms","volume":"29","author":"Wang","year":"2019","journal-title":"IEEE Transactions on Image Processing"},{"key":"10.1016\/j.eswa.2021.116424_b203","doi-asserted-by":"crossref","unstructured":"Wang,\u00a0P., Li,\u00a0Z., Hou,\u00a0Y., & Li,\u00a0W. (2016). Action recognition based on joint trajectory maps using convolutional neural networks. In Proceedings of the 24th ACM international conference on multimedia.","DOI":"10.1145\/2964284.2967191"},{"key":"10.1016\/j.eswa.2021.116424_b204","series-title":"Cooperative training of deep aggregation networks for RGB-D action recognition","author":"Wang","year":"2017"},{"key":"10.1016\/j.eswa.2021.116424_b205","series-title":"2012 IEEE Conference on computer vision and pattern recognition","first-page":"1290","article-title":"Mining actionlet ensemble for action recognition with depth cameras","author":"Wang","year":"2012"},{"key":"10.1016\/j.eswa.2021.116424_b206","doi-asserted-by":"crossref","unstructured":"Wang,\u00a0J., Nie,\u00a0X., Xia,\u00a0Y., Wu,\u00a0Y., & Zhu,\u00a0S.-C. (2014). Cross-view action modeling, learning and recognition. In Proceedings Of The IEEE conference on computer vision and pattern recognition (pp. 2649\u20132656).","DOI":"10.1109\/CVPR.2014.339"},{"key":"10.1016\/j.eswa.2021.116424_b207","doi-asserted-by":"crossref","unstructured":"Wang,\u00a0H., & Wang,\u00a0L. (2017). Modeling temporal dynamics and spatial configurations of actions using two-stream recurrent neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 499\u2013508).","DOI":"10.1109\/CVPR.2017.387"},{"key":"10.1016\/j.eswa.2021.116424_b208","doi-asserted-by":"crossref","unstructured":"Wang,\u00a0P., Wang,\u00a0S., Gao,\u00a0Z., Hou,\u00a0Y., & Li,\u00a0W. (2017). Structured images for RGB-D action recognition. In Proceedings of the IEEE international conference on computer vision workshops (pp. 1005\u20131014).","DOI":"10.1109\/ICCVW.2017.123"},{"key":"10.1016\/j.eswa.2021.116424_b209","series-title":"European conference on computer vision","first-page":"370","article-title":"Graph based skeleton motion representation and similarity measurement for action recognition","author":"Wang","year":"2016"},{"issue":"10","key":"10.1016\/j.eswa.2021.116424_b210","doi-asserted-by":"crossref","first-page":"2905","DOI":"10.3390\/s20102905","article-title":"C-MHAD: Continuous multimodal human action dataset of simultaneous video and inertial sensing","volume":"20","author":"Wei","year":"2020","journal-title":"Sensors"},{"key":"10.1016\/j.eswa.2021.116424_b211","doi-asserted-by":"crossref","unstructured":"Wei,\u00a0P., Zhao,\u00a0Y., Zheng,\u00a0N., & Zhu,\u00a0S.-C. (2013). Modeling 4d human-object interactions for event and object recognition. In Proceedings Of The IEEE international conference on computer vision (pp. 3272\u20133279).","DOI":"10.1109\/ICCV.2013.406"},{"key":"10.1016\/j.eswa.2021.116424_b212","series-title":"2017 International joint conference on neural networks","first-page":"2865","article-title":"Recent advances in video-based human action recognition using deep learning: a review","author":"Wu","year":"2017"},{"key":"10.1016\/j.eswa.2021.116424_b213","doi-asserted-by":"crossref","unstructured":"Wu,\u00a0C., Wu,\u00a0X.-J., & Kittler,\u00a0J. (2019). Spatial residual layer and dense connection block enhanced spatial temporal graph convolutional network for skeleton-based action recognition. In Proceedings of the IEEE international conference on computer vision workshops.","DOI":"10.1109\/ICCVW.2019.00216"},{"key":"10.1016\/j.eswa.2021.116424_b214","series-title":"2012 IEEE computer society conference on computer vision and pattern recognition workshops","first-page":"20","article-title":"View invariant human action recognition using histograms of 3d joints","author":"Xia","year":"2012"},{"issue":"10","key":"10.1016\/j.eswa.2021.116424_b215","doi-asserted-by":"crossref","first-page":"1135","DOI":"10.3390\/e22101135","article-title":"Global co-occurrence feature and local spatial feature learning for skeleton-based action recognition","volume":"22","author":"Xie","year":"2020","journal-title":"Entropy"},{"key":"10.1016\/j.eswa.2021.116424_b216","series-title":"2020 IEEE international conference on e-health networking, application & services","first-page":"1","article-title":"Multi-level co-occurrence graph convolutional LSTM for skeleton-based action recognition","author":"Xu","year":"2021"},{"key":"10.1016\/j.eswa.2021.116424_b217","doi-asserted-by":"crossref","first-page":"324","DOI":"10.1016\/j.pmcj.2017.07.001","article-title":"Learning multi-level features for sensor-based human action recognition","volume":"40","author":"Xu","year":"2017","journal-title":"Pervasive and Mobile Computing"},{"key":"10.1016\/j.eswa.2021.116424_b218","doi-asserted-by":"crossref","DOI":"10.1109\/TCSVT.2018.2864148","article-title":"Action recognition with spatio\u2013temporal visual attention on skeleton image sequences","author":"Yang","year":"2019","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"10.1016\/j.eswa.2021.116424_b219","series-title":"Ijcai, Vol. 15","first-page":"3995","article-title":"Deep convolutional neural networks on multichannel time series for human activity recognition","author":"Yang","year":"2015"},{"key":"10.1016\/j.eswa.2021.116424_b220","series-title":"2020 IEEE region 10 conference","first-page":"1260","article-title":"Human motion recognition by three-view kinect sensors in virtual basketball training","author":"Yao","year":"2020"},{"key":"10.1016\/j.eswa.2021.116424_b221","doi-asserted-by":"crossref","first-page":"14","DOI":"10.1016\/j.patrec.2018.05.018","article-title":"A review of convolutional-neural-network-based action recognition","volume":"118","author":"Yao","year":"2019","journal-title":"Pattern Recognition Letters"},{"key":"10.1016\/j.eswa.2021.116424_b222","series-title":"Dynamic GCN: Context-enriched topology learning for skeleton-based action recognition","author":"Ye","year":"2020"},{"key":"10.1016\/j.eswa.2021.116424_b223","series-title":"2019 IEEE biomedical circuits and systems conference","first-page":"1","article-title":"A skeleton-based action recognition system for medical condition detection","author":"Yin","year":"2019"},{"key":"10.1016\/j.eswa.2021.116424_b224","series-title":"2017 IEEE frontiers in education conference","first-page":"1","article-title":"Behavior detection and analysis for learning process in classroom environment","author":"Yu","year":"2017"},{"key":"10.1016\/j.eswa.2021.116424_b225","series-title":"Artificial intelligence for communications and networks","first-page":"450","article-title":"Sensor-based human activity recognition for smart healthcare: A semi-supervised machine learning","author":"Zahin","year":"2019"},{"key":"10.1016\/j.eswa.2021.116424_b226","series-title":"2016 IEEE sensors","first-page":"1","article-title":"Human activity recognition with inertial sensors using a deep learning approach","author":"Zebin","year":"2016"},{"key":"10.1016\/j.eswa.2021.116424_b227","series-title":"2021 International conference on control science and electric power systems","first-page":"107","article-title":"Research on 3D modeling and detection methods of wrong actions in sports","author":"Zhai","year":"2021"},{"issue":"2","key":"10.1016\/j.eswa.2021.116424_b228","doi-asserted-by":"crossref","first-page":"4","DOI":"10.1109\/MMUL.2012.24","article-title":"Microsoft kinect sensor and its effect","volume":"19","author":"Zhang","year":"2012","journal-title":"IEEE Multimedia"},{"issue":"8","key":"10.1016\/j.eswa.2021.116424_b229","doi-asserted-by":"crossref","first-page":"1963","DOI":"10.1109\/TPAMI.2019.2896631","article-title":"View adaptive neural networks for high performance skeleton-based human action recognition","volume":"41","author":"Zhang","year":"2019","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"10.1016\/j.eswa.2021.116424_b230","doi-asserted-by":"crossref","unstructured":"Zhang,\u00a0P., Lan,\u00a0C., Zeng,\u00a0W., Xing,\u00a0J., Xue,\u00a0J., & Zheng,\u00a0N. (2020). Semantics-Guided Neural Networks for Efficient Skeleton-Based Human Action Recognition. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 1112\u20131121).","DOI":"10.1109\/CVPR42600.2020.00119"},{"key":"10.1016\/j.eswa.2021.116424_b231","doi-asserted-by":"crossref","DOI":"10.1155\/2017\/3090343","article-title":"A review on human activity recognition using vision-based method","volume":"2017","author":"Zhang","year":"2017","journal-title":"Journal of Healthcare Engineering"},{"key":"10.1016\/j.eswa.2021.116424_b232","doi-asserted-by":"crossref","first-page":"1061","DOI":"10.1109\/TIP.2019.2937724","article-title":"Eleatt-rnn: Adding attentiveness to neurons in recurrent neural networks","volume":"29","author":"Zhang","year":"2019","journal-title":"IEEE Transactions on Image Processing"},{"issue":"5","key":"10.1016\/j.eswa.2021.116424_b233","doi-asserted-by":"crossref","first-page":"1005","DOI":"10.3390\/s19051005","article-title":"A comprehensive survey of vision-based human action recognition methods","volume":"19","author":"Zhang","year":"2019","journal-title":"Sensors"},{"key":"10.1016\/j.eswa.2021.116424_b234","doi-asserted-by":"crossref","unstructured":"Zhao,\u00a0R., Xu,\u00a0W., Su,\u00a0H., & Ji,\u00a0Q. (2019). Bayesian hierarchical dynamic model for human action recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 7733\u20137742).","DOI":"10.1109\/CVPR.2019.00792"},{"issue":"7","key":"10.1016\/j.eswa.2021.116424_b235","doi-asserted-by":"crossref","first-page":"2146","DOI":"10.3390\/s18072146","article-title":"Comparison of data preprocessing approaches for applying deep learning to human activity recognition in the context of industry 4.0","volume":"18","author":"Zheng","year":"2018","journal-title":"Sensors"},{"key":"10.1016\/j.eswa.2021.116424_b236","series-title":"Action machine: Rethinking action recognition in trimmed videos","author":"Zhu","year":"2018"},{"issue":"17","key":"10.1016\/j.eswa.2021.116424_b237","doi-asserted-by":"crossref","first-page":"5895","DOI":"10.3390\/s21175895","article-title":"Real-time action recognition system for elderly people using stereo depth camera","volume":"21","author":"Zin","year":"2021","journal-title":"Sensors"},{"key":"10.1016\/j.eswa.2021.116424_b238","series-title":"2016 Federated conference on computer science and information systems","first-page":"1435","article-title":"SARF: Smart activity recognition framework in ambient assisted living","author":"Zolfaghari","year":"2016"}],"container-title":["Expert Systems with Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0957417421017115?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0957417421017115?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2024,2,20]],"date-time":"2024-02-20T20:00:21Z","timestamp":1708459221000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0957417421017115"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5]]},"references-count":238,"alternative-id":["S0957417421017115"],"URL":"https:\/\/doi.org\/10.1016\/j.eswa.2021.116424","relation":{},"ISSN":["0957-4174"],"issn-type":[{"value":"0957-4174","type":"print"}],"subject":[],"published":{"date-parts":[[2022,5]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"3D Human Action Recognition: Through the eyes of researchers","name":"articletitle","label":"Article Title"},{"value":"Expert Systems with Applications","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.eswa.2021.116424","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2022 Elsevier Ltd. All rights reserved.","name":"copyright","label":"Copyright"}],"article-number":"116424"}}