{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T05:40:40Z","timestamp":1730353240118,"version":"3.28.0"},"reference-count":59,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2023,10,19]],"date-time":"2023-10-19T00:00:00Z","timestamp":1697673600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,10,19]],"date-time":"2023-10-19T00:00:00Z","timestamp":1697673600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Multimodal User Interfaces"],"published-print":{"date-parts":[[2024,3]]},"DOI":"10.1007\/s12193-023-00421-w","type":"journal-article","created":{"date-parts":[[2023,10,19]],"date-time":"2023-10-19T13:02:54Z","timestamp":1697720574000},"page":"69-85","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Comparing alternative modalities in the context of multimodal human\u2013robot interaction"],"prefix":"10.1007","volume":"18","author":[{"ORCID":"http:\/\/orcid.org\/0000-0002-3037-3495","authenticated-orcid":false,"given":"Suprakas","family":"Saren","sequence":"first","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0002-4341-0523","authenticated-orcid":false,"given":"Abhishek","family":"Mukhopadhyay","sequence":"additional","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0001-5022-4123","authenticated-orcid":false,"given":"Debasish","family":"Ghose","sequence":"additional","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0003-3054-6699","authenticated-orcid":false,"given":"Pradipta","family":"Biswas","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,19]]},"reference":[{"key":"421_CR1","doi-asserted-by":"publisher","first-page":"59","DOI":"10.1016\/j.procs.2014.11.010","volume":"39","author":"P Biswas","year":"2014","unstructured":"Biswas P, Langdon P (2014) Eye-gaze tracking based interaction in India. Procedia Comput Sci 39:59\u201366","journal-title":"Procedia Comput Sci"},{"issue":"1","key":"421_CR2","doi-asserted-by":"publisher","first-page":"179","DOI":"10.1109\/TCDS.2019.2959071","volume":"13","author":"J Guo","year":"2019","unstructured":"Guo J et al (2019) A novel robotic guidance system with eye-gaze tracking control for needle-based interventions. IEEE Trans Cognit Dev Syst 13(1):179\u2013188","journal-title":"IEEE Trans Cognit Dev Syst"},{"key":"421_CR3","doi-asserted-by":"crossref","unstructured":"Palinko O et al (2015) Eye gaze tracking for a humanoid robot. In: 2015 IEEE-RAS 15th International Conference on Humanoid Robots (Humanoids), IEEE","DOI":"10.1109\/HUMANOIDS.2015.7363561"},{"key":"421_CR4","doi-asserted-by":"crossref","unstructured":"Sharma S et al (2016) Gesture-based interaction for individuals with developmental disabilities in India. In: Proceedings of the 18th International ACM SIGACCESS Conference on Computers and Accessibility","DOI":"10.1145\/2982142.2982166"},{"key":"421_CR5","doi-asserted-by":"publisher","first-page":"287","DOI":"10.1016\/j.aap.2014.07.014","volume":"72","author":"J He","year":"2014","unstructured":"He J et al (2014) Texting while driving: Is speech-based text entry less risky than handheld text entry? Accid Anal Prev 72:287\u2013295","journal-title":"Accid Anal Prev"},{"key":"421_CR6","doi-asserted-by":"crossref","unstructured":"Klamka K et al (2015) Look & pedal: Hands-free navigation in zoomable information spaces through gaze-supported foot input. In: Proceedings of the 2015 ACM on international conference on multi-modal interaction","DOI":"10.1145\/2818346.2820751"},{"key":"421_CR7","doi-asserted-by":"crossref","unstructured":"Manawadu UE et al (2017) A multimodal human-machine interface enabling situation-Adaptive control inputs for highly automated vehicles. In: 2017 IEEE Intelligent Vehicles Symposium (IV), IEEE","DOI":"10.1109\/IVS.2017.7995875"},{"key":"421_CR8","doi-asserted-by":"crossref","unstructured":"Bolt R (1980) Put-that-there\" Voice and gesture at the graphics interface,\". In: Proceedings of the 7th annual conference on Computer graphics and interactive techniques","DOI":"10.1145\/800250.807503"},{"key":"421_CR9","doi-asserted-by":"crossref","unstructured":"Hornof AJ and Cavender A (2005) EyeDraw: enabling children with severe motor impairments to draw with their eyes. In: Proceedings of the SIGCHI conference on Human factors in computing systems","DOI":"10.1145\/1054972.1054995"},{"key":"421_CR10","doi-asserted-by":"crossref","unstructured":"Nancel M et al (2011) Mid-air pan-and-zoom on wall-sized displays. In: Proceedings of the SIGCHI Conference on Human Factors in Computing Systems","DOI":"10.1145\/1978942.1978969"},{"key":"421_CR11","doi-asserted-by":"crossref","unstructured":"Serpiva V et al (2021) Dronepaint: swarm light painting with DNN-based gesture recognition. ACM SIGGRAPH 2021 Emerging Technologies, pp 1-4","DOI":"10.1145\/3450550.3465349"},{"issue":"4","key":"421_CR12","doi-asserted-by":"publisher","first-page":"357","DOI":"10.1007\/s12193-022-00396-0","volume":"16","author":"B Yam-Viramontes","year":"2022","unstructured":"Yam-Viramontes B et al (2022) Commanding a drone through body poses, improving the user experience. J Multimod User Interfaces 16(4):357\u2013369","journal-title":"J Multimod User Interfaces"},{"key":"421_CR13","doi-asserted-by":"crossref","unstructured":"Majaranta P and R\u00e4ih\u00e4 K-J (2002) Twenty years of eye typing: systems and design issues. In: Proceedings of the 2002 symposium on Eye tracking research & applications","DOI":"10.1145\/507075.507076"},{"key":"421_CR14","doi-asserted-by":"crossref","unstructured":"Kumar M et al (2007) Eyepoint: practical pointing and selection using gaze and keyboard. In: Proceedings of the SIGCHI conference on Human factors in computing systems","DOI":"10.1145\/1240624.1240692"},{"key":"421_CR15","unstructured":"Sharma VK et al (2020) Eye gaze controlled robotic arm for persons with severe speech and motor impairment. In: ACM Symposium on Eye Tracking Research and Applications"},{"issue":"11","key":"421_CR16","doi-asserted-by":"publisher","first-page":"74","DOI":"10.1145\/319382.319398","volume":"42","author":"S Oviatt","year":"1999","unstructured":"Oviatt S (1999) Ten myths of multimodal interaction. Commun ACM 42(11):74\u201381","journal-title":"Commun ACM"},{"issue":"4","key":"421_CR17","doi-asserted-by":"publisher","first-page":"293","DOI":"10.1007\/s10055-013-0230-0","volume":"17","author":"M Lee","year":"2013","unstructured":"Lee M et al (2013) A usability study of multimodal input in an augmented reality environment. Virt Real 17(4):293\u2013305","journal-title":"Virt Real"},{"issue":"1","key":"421_CR18","doi-asserted-by":"publisher","first-page":"233","DOI":"10.1007\/s11042-011-0983-y","volume":"62","author":"W H\u00fcrst","year":"2013","unstructured":"H\u00fcrst W, Van Wezel C (2013) Gesture-based interaction via finger tracking for mobile augmented reality. Multimed Tools Appl 62(1):233\u2013258","journal-title":"Multimed Tools Appl"},{"key":"421_CR19","doi-asserted-by":"crossref","unstructured":"M. Van den Bergh, et al., \"Real-time 3D hand gesture interaction with a robot for understanding directions from humans,\" 2011 Ro-Man. IEEE, 2011.","DOI":"10.1109\/ROMAN.2011.6005195"},{"issue":"2","key":"421_CR20","doi-asserted-by":"publisher","first-page":"499","DOI":"10.1016\/j.jvcir.2013.03.017","volume":"25","author":"V Alvarez-Santos","year":"2014","unstructured":"Alvarez-Santos V et al (2014) Gesture-based interaction with voice feedback for a tour-guide robot. J Vis Commun Image Represent 25(2):499\u2013509","journal-title":"J Vis Commun Image Represent"},{"key":"421_CR21","doi-asserted-by":"crossref","unstructured":"Haddadi A et al (2013) Analysis of task-based gestures in human-robot interaction. In: 2013 IEEE International Conference on Robotics and Automation, IEEE","DOI":"10.1109\/ICRA.2013.6630865"},{"key":"421_CR22","doi-asserted-by":"publisher","first-page":"116485","DOI":"10.1109\/ACCESS.2022.3218679","volume":"10","author":"J Al Mahmud","year":"2022","unstructured":"Al Mahmud J et al (2022) 3D gesture recognition and adaptation for human-robot interaction. IEEE Access 10:116485\u2013116513","journal-title":"IEEE Access"},{"key":"421_CR23","doi-asserted-by":"crossref","unstructured":"Coronado E et al (2017) Gesture-based robot control: Design challenges and evaluation with humans. In: 2017 IEEE international conference on robotics and automation (ICRA), IEEE","DOI":"10.1109\/ICRA.2017.7989321"},{"issue":"9","key":"421_CR24","doi-asserted-by":"publisher","first-page":"1643","DOI":"10.1007\/s11548-017-1523-7","volume":"12","author":"J Hettig","year":"2017","unstructured":"Hettig J et al (2017) Comparison of gesture and conventional interaction techniques for interventional neuroradiology. Int J Comput Assist Radiol Surg 12(9):1643\u20131653","journal-title":"Int J Comput Assist Radiol Surg"},{"key":"421_CR25","unstructured":"Gips J and Olivieri P (1996) EagleEyes: an eye control system for persons with disabilities. In: The eleventh international conference on technology and persons with disabilities"},{"key":"421_CR26","doi-asserted-by":"crossref","unstructured":"Wobbrock JO et al (2008) Longitudinal evaluation of discrete consecutive gaze gestures for text entry. In: Proceedings of the 2008 symposium on Eye tracking research & applications","DOI":"10.1145\/1344471.1344475"},{"issue":"4","key":"421_CR27","doi-asserted-by":"publisher","first-page":"277","DOI":"10.1080\/10447318.2014.1001301","volume":"31","author":"P Biswas","year":"2015","unstructured":"Biswas P, Langdon P (2015) Multimodal intelligent eye-gaze tracking system. Int Jf Human-Comput Interact 31(4):277\u2013294","journal-title":"Int Jf Human-Comput Interact"},{"issue":"2","key":"421_CR28","doi-asserted-by":"publisher","first-page":"34","DOI":"10.7771\/2159-6670.1235","volume":"10","author":"LRD Murthy","year":"2021","unstructured":"Murthy LRD et al (2021) Eye-gaze-controlled HMDS and MFD for military aircraft. J Aviat Technol Eng 10(2):34","journal-title":"J Aviat Technol Eng"},{"key":"421_CR29","doi-asserted-by":"crossref","unstructured":"Lim Y et al (2018) Eye-tracking sensors for adaptive aerospace human-machine interfaces and interactions. In: 2018 5th IEEE international workshop on metrology for aerospace (MetroAeroSpace), IEEE","DOI":"10.1109\/MetroAeroSpace.2018.8453509"},{"issue":"5","key":"421_CR30","doi-asserted-by":"publisher","first-page":"924","DOI":"10.1016\/j.aap.2009.05.007","volume":"41","author":"M Jannette","year":"2009","unstructured":"Jannette M, Vollrath M (2009) Comparison of manual vs. speech-based interaction with in-vehicle information systems. Accid Anal Prev 41(5):924\u2013930","journal-title":"Accid Anal Prev"},{"issue":"4","key":"421_CR31","doi-asserted-by":"publisher","first-page":"631","DOI":"10.1518\/001872001775870340","volume":"43","author":"JD Lee","year":"2001","unstructured":"Lee JD et al (2001) Speech-based interaction with in-vehicle computers: the effect of speech-based email on drivers\u2019 attention to the roadway. Hum Factors 43(4):631\u2013640","journal-title":"Hum Factors"},{"key":"421_CR32","doi-asserted-by":"crossref","unstructured":"Doyle J and Bertolotto M (2006) Combining speech and pen input for effective interaction in mobile geospatial environments. In: Proceedings of the 2006 ACM symposium on Applied computing","DOI":"10.1145\/1141277.1141557"},{"key":"421_CR33","doi-asserted-by":"crossref","unstructured":"Fr\u00f6hlich J and Wachsmuth I (2013) The visual, the auditory and the haptic\u2013a user study on combining modalities in virtual worlds. In: International Conference on Virtual, Augmented and Mixed Reality, Springer, Berlin, Heidelberg","DOI":"10.1007\/978-3-642-39405-8_19"},{"key":"421_CR34","doi-asserted-by":"crossref","unstructured":"Frisch M et al (2009) Investigating multi-touch and pen gestures for diagram editing on interactive surfaces. In: Proceedings of the ACM international conference on interactive tabletops and surfaces","DOI":"10.1145\/1731903.1731933"},{"key":"421_CR35","doi-asserted-by":"crossref","unstructured":"Pfeuffer K et al (2014) Gaze-touch: combining gaze with multi-touch for interaction on the same surface. In: Proceedings of the 27th annual ACM symposium on User interface software and technology","DOI":"10.1145\/2642918.2647397"},{"key":"421_CR36","doi-asserted-by":"crossref","unstructured":"Hatscher B and Hansen C (2018) Hand, foot or voice: alternative input modalities for touchless interaction in the medical domain. In: Proceedings of the 20th ACM international conference on multi-modal interaction","DOI":"10.1145\/3242969.3242971"},{"key":"421_CR37","doi-asserted-by":"crossref","unstructured":"Chen Z et al (2017) Multi-modal interaction in augmented reality. In: 2017 IEEE international conference on systems, man, and cybernetics (SMC), IEEE","DOI":"10.1109\/SMC.2017.8122603"},{"issue":"1","key":"421_CR38","doi-asserted-by":"publisher","first-page":"101","DOI":"10.1007\/s12193-019-00316-9","volume":"14","author":"G Prabhakar","year":"2020","unstructured":"Prabhakar G et al (2020) Interactive gaze and finger controlled HUD for cars. J Multi-Modal User Interfaces 14(1):101\u2013121","journal-title":"J Multi-Modal User Interfaces"},{"key":"421_CR39","doi-asserted-by":"crossref","unstructured":"Palinko O et al (2016) Robot reading human gaze: Why eye tracking is better than head tracking for human-robot collaboration. In: 2016 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), IEEE","DOI":"10.1109\/IROS.2016.7759741"},{"key":"421_CR40","doi-asserted-by":"crossref","unstructured":"Craig TL et al (2016) Human gaze commands classification: a shape based approach to interfacing with robots. In: 2016 12th IEEE\/ASME International Conference on Mechatronic and Embedded Systems and Applications (MESA), IEEE","DOI":"10.1109\/MESA.2016.7587154"},{"issue":"2","key":"421_CR41","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2744206","volume":"7","author":"F Rudzicz","year":"2015","unstructured":"Rudzicz F et al (2015) Speech interaction with personal assistive robots supporting aging at home for individuals with Alzheimer\u2019s disease. ACM Trans Access Comput (TACCESS) 7(2):1\u201322","journal-title":"ACM Trans Access Comput (TACCESS)"},{"key":"421_CR42","doi-asserted-by":"crossref","unstructured":"Prodanov PJ et al (2002) Voice enabled interface for interactive tour-guide robots. In: IEEE\/RSJ International Conference on Intelligent Robots and Systems. Vol 2, IEEE","DOI":"10.1109\/IRDS.2002.1043939"},{"issue":"2","key":"421_CR43","doi-asserted-by":"publisher","first-page":"607","DOI":"10.1109\/TII.2016.2625818","volume":"13","author":"K Zinchenko","year":"2016","unstructured":"Zinchenko K et al (2016) A study on speech recognition control for a surgical robot. IEEE Trans Industr Inf 13(2):607\u2013615","journal-title":"IEEE Trans Industr Inf"},{"key":"421_CR44","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2020.103903","volume":"95","author":"MC Bingol","year":"2020","unstructured":"Bingol MC, Aydogmus O (2020) Performing predefined tasks using the human\u2013robot interaction on speech recognition for an industrial robot. Eng Appl Artif Intell 95:103903","journal-title":"Eng Appl Artif Intell"},{"key":"421_CR45","unstructured":"Kurnia R et al (2004) Object recognition through human-robot interaction by speech. RO-MAN 2004. In: 13th IEEE International Workshop on Robot and Human Interactive Communication (IEEE Catalog No. 04TH8759), IEEE"},{"key":"421_CR46","doi-asserted-by":"crossref","unstructured":"Bannat A et al (2009) A multimodal human-robot-interaction scenario: working together with an industrial robot. In: International conference on human-computer interaction, Springer, Berlin, Heidelberg","DOI":"10.1007\/978-3-642-02577-8_33"},{"issue":"1","key":"421_CR47","doi-asserted-by":"publisher","first-page":"19","DOI":"10.1007\/s11370-012-0123-1","volume":"6","author":"G Randelli","year":"2013","unstructured":"Randelli G et al (2013) Knowledge acquisition through human\u2013robot multimodal interaction. Intel Serv Robot 6(1):19\u201331","journal-title":"Intel Serv Robot"},{"issue":"3","key":"421_CR48","doi-asserted-by":"publisher","first-page":"923","DOI":"10.3390\/s22030923","volume":"22","author":"D Strazdas","year":"2022","unstructured":"Strazdas D et al (2022) Robot system assistant (RoSA): towards intuitive multi-modal and multi-device human-robot interaction. Sensors 22(3):923","journal-title":"Sensors"},{"key":"421_CR49","unstructured":"Tobii PCEye Mini. https:\/\/www.tobii.com\/products\/eye-trackers. Accessed on 31st October 2022"},{"key":"421_CR50","unstructured":"Leap Motion Controller. https:\/\/leap2.ultraleap.com\/leap-motion-controller-2\/. Accessed on 31st Oct 2022"},{"key":"421_CR51","unstructured":"Turtlebot3 Burger. https:\/\/emanual.robotis.com\/docs\/en\/platform\/turtlebot3\/overview\/. Accessed on 31st Oct 2022"},{"key":"421_CR52","unstructured":"Dobot Magician Lite. https:\/\/www.dobot-robots.com\/products\/education\/magician-lite.html. Accessed on 31st Oct 2022"},{"key":"421_CR53","unstructured":"Logitech C310 HD Webcam.https:\/\/www.logitech.com\/en-in\/products\/webcams\/c310-hd-webcam.960-000588.html. Accessed on 31st Oct 2022"},{"key":"421_CR54","unstructured":"NiTHO Drive Pro One. https:\/\/nitho.com\/products\/drive-pro%E2%84%A2-one-racing-wheel. Accessed on 31st Oct 2022"},{"key":"421_CR55","unstructured":"NATO Phonetic Alphabet. https:\/\/www.worldometers.info\/languages\/nato-phonetic-alphabet\/. Accessed 31 Oct 2022"},{"key":"421_CR56","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1016\/S0166-4115(08)62386-9","volume":"52","author":"SG Hart","year":"1988","unstructured":"Hart SG, Staveland LE (1988) Development of NASA-TLX (Task Load Index): results of empirical and theoretical research. Adv Psychol 52:139\u2013183","journal-title":"Adv Psychol"},{"key":"421_CR57","doi-asserted-by":"crossref","unstructured":"Biswas P and Dv J (2018) Eye gaze controlled MFD for military aviation. In: 23rd International Conference on Intelligent User Interfaces","DOI":"10.1145\/3172944.3172973"},{"key":"421_CR58","unstructured":"Karpov A and Ronzhin A (2014) A universal assistive technology with multimodal input and multimedia output interfaces. Universal Access in Human-Computer Interaction. Design and Development Methods for Universal Access: 8th International Conference, UAHCI 2014, Held as Part of HCI International 2014, Heraklion, Crete, Greece, Jun 22\u201327, 2014, Proceedings, Part I 8, Springer International Publishing"},{"key":"421_CR59","doi-asserted-by":"crossref","unstructured":"Mukhopadhyay A et al (2019) Comparing CNNs for non-conventional traffic participants. In: Proceedings of the 11th International Conference on Automotive User Interfaces and Interactive Vehicular Applications: Adjunct Proceedings","DOI":"10.1145\/3349263.3351336"}],"container-title":["Journal on Multimodal User Interfaces"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s12193-023-00421-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s12193-023-00421-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s12193-023-00421-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T05:00:48Z","timestamp":1730350848000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s12193-023-00421-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,19]]},"references-count":59,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2024,3]]}},"alternative-id":["421"],"URL":"https:\/\/doi.org\/10.1007\/s12193-023-00421-w","relation":{},"ISSN":["1783-7677","1783-8738"],"issn-type":[{"type":"print","value":"1783-7677"},{"type":"electronic","value":"1783-8738"}],"subject":[],"published":{"date-parts":[[2023,10,19]]},"assertion":[{"value":"10 February 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 September 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 October 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}