{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,12]],"date-time":"2024-09-12T00:18:13Z","timestamp":1726100293055},"publisher-location":"Cham","reference-count":22,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030602758"},{"type":"electronic","value":"9783030602765"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-60276-5_19","type":"book-chapter","created":{"date-parts":[[2020,10,4]],"date-time":"2020-10-04T07:02:44Z","timestamp":1601794964000},"page":"184-193","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["Automated Destructive Behavior State Detection on the 1D CNN-Based Voice Analysis"],"prefix":"10.1007","author":[{"ORCID":"http:\/\/orcid.org\/0000-0001-8358-298X","authenticated-orcid":false,"given":"Anastasia","family":"Iskhakova","sequence":"first","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0001-5021-5411","authenticated-orcid":false,"given":"Daniyar","family":"Wolf","sequence":"additional","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0002-1129-8434","authenticated-orcid":false,"given":"Roman","family":"Meshcheryakov","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,9,29]]},"reference":[{"key":"19_CR1","doi-asserted-by":"crossref","unstructured":"Iskhakova, A., Iskhakov, A., Meshcheryakov, R.: Research of the estimated emotional components for the content analysis. In: Journal of Physics: Conference Series, vol. 1203, pp. 012065 (2019)","DOI":"10.1088\/1742-6596\/1203\/1\/012065"},{"key":"19_CR2","unstructured":"Kulagina, I., Iskhakova, A., Galin, R.: Modeling the practice of aggression in the socio-cyber-physical environment. Vestnik tomskogo gosudarstvennogo universiteta-Filosofiya-Sotsiologiya-Politologiya-Tomsk state Univ. J. Phil. Sociol. Polit. Sci. 52, 147\u2013161 (2019). (in Russian)"},{"key":"19_CR3","series-title":"Smart Innovation, Systems and Technologies","doi-asserted-by":"publisher","first-page":"511","DOI":"10.1007\/978-981-13-9267-2_42","volume-title":"Proceedings of 14th International Conference on Electromechanics and Robotics \u201cZavalishin\u2019s Readings\u201d","author":"D Levonevskii","year":"2020","unstructured":"Levonevskii, D., Shumskaya, O., Velichko, A., Uzdiaev, M., Malov, D.: Methods for determination of psychophysiological condition of user within smart environment based on complex analysis of heterogeneous data. In: Ronzhin, A., Shishlakov, V. (eds.) Proceedings of 14th International Conference on Electromechanics and Robotics \u201cZavalishin\u2019s Readings\u201d. SIST, vol. 154, pp. 511\u2013523. Springer, Singapore (2020). https:\/\/doi.org\/10.1007\/978-981-13-9267-2_42"},{"key":"19_CR4","unstructured":"Malov, D., Shumskaya, O.: Audiovisual content feature selection for emotion recognition system. In: International Conference Cyber-Physical Systems and Control CPS&C (2019)"},{"key":"19_CR5","doi-asserted-by":"crossref","unstructured":"Zheng, W.-L., Zhu, J.-Y., Yong, P., Lu, B.-L.: EEG-based emotion classification using deep belief networks. In: IEEE International Conference on Multimedia & Expo IEEE, pp.\u00a01\u20136. Chengdu, China (2014)","DOI":"10.1109\/ICME.2014.6890166"},{"key":"19_CR6","doi-asserted-by":"crossref","unstructured":"Han, K., Yu, D., Tashev, I.: speech emotion recognition using deep neural network and extreme learning machine. In: INTERSPEECH, pp. 223\u2013227 (2014)","DOI":"10.21437\/Interspeech.2014-57"},{"key":"19_CR7","doi-asserted-by":"crossref","unstructured":"Haq, S., Jackson, P.J.B.: Multimodal emotion recognition. In: Machine Audition. Principles, Algorithms and Systems, vol. 17, pp. 398\u2013423. IGI Global Press (2010)","DOI":"10.4018\/978-1-61520-919-4.ch017"},{"key":"19_CR8","doi-asserted-by":"crossref","unstructured":"Burkhardt, F., Paeschke, A., Rolfes, M., Sendlmeier, W., Weiss, B.: A database of German emotional speech. In: 9th European Conference on Speech Communication and Technology, Lisbon, Portugal, vol. 5, pp. 1517\u20131520 (2005)","DOI":"10.21437\/Interspeech.2005-446"},{"key":"19_CR9","unstructured":"Serrestou, Y., Mbarki, M., Raoof, K., Mahjoub, M.: Speech emotion recognition: methods and cases study. In: Proceedings of the 10th International Conference on Agents and Artificial Intelligence (ICAART 2018), vol. 2, pp. 175\u2013182 (2018)"},{"key":"19_CR10","doi-asserted-by":"crossref","unstructured":"Hossan, M.A., Memon, S., Gregory, M.A.: A novel approach for MFCC feature extraction. In: 2010 4th International Conference on Signal Processing and Communication Systems, Gold Coast, QLD, Australia, pp. 1\u20135 (2010)","DOI":"10.1109\/ICSPCS.2010.5709752"},{"key":"19_CR11","unstructured":"Niu, Y., Zou, D., Niu, Y., He, Z., Tan, H.: A breakthrough in speech emotion recognition using deep retinal convolution neural networks. https:\/\/arxiv.org\/abs\/1707.09917 . Accessed 21 July 2020"},{"key":"19_CR12","doi-asserted-by":"crossref","unstructured":"Oludare, A., Aman, J.: Comprehensive review of artificial neural network applications to pattern recognition. In: IEEE Access, vol. 7, pp. 158820\u2013158846 (2019)","DOI":"10.1109\/ACCESS.2019.2945545"},{"key":"19_CR13","doi-asserted-by":"crossref","unstructured":"Kim, Y.: convolutional neural networks for sentence classification. In: Proceedings of the 2014 Conference on EMNLP, Doha, Qatar, pp. 1746\u20131751 (2014)","DOI":"10.3115\/v1\/D14-1181"},{"key":"19_CR14","doi-asserted-by":"publisher","unstructured":"Dupuis, K., Kathleen, M.: Toronto emotional speech set (TESS). https:\/\/doi.org\/10.5683\/SP2\/E8H2MF . Accessed 21 July 2020","DOI":"10.5683\/SP2\/E8H2MF"},{"key":"19_CR15","unstructured":"Busso, C., et al.: IEMOCAP: interactive emotional dyadic motion capture database. https:\/\/sail.usc.edu\/iemocap\/iemocap_release.htm . Accessed 21 July 2020"},{"key":"19_CR16","doi-asserted-by":"crossref","unstructured":"Zhang, L., et al.: BioVid Emo DB: a multimodal database for emotion analyses validated by subjective ratings. In: 2016 IEEE Symposium Series on Computational Intelligence (SSCI), Athens, pp. 1\u20136 (2016)","DOI":"10.1109\/SSCI.2016.7849931"},{"key":"19_CR17","doi-asserted-by":"crossref","unstructured":"Fayek, H.M., Lech, M., Cavedon, L.: Towards real-time Speech Emotion Recognition using deep neural networks. In: International Conference on Signal Processing and Communication Systems 2015, Cairns, QLD, Australia, pp. 1\u20135 (2015)","DOI":"10.1109\/ICSPCS.2015.7391796"},{"issue":"2","key":"19_CR18","first-page":"229","volume":"6","author":"TS Aleshina","year":"2016","unstructured":"Aleshina, T.S., Redko, AYu.: Bases of speech data corpus preparation for the emotional speech recognition. Mod. High Technol. 6(2), 229\u2013233 (2016). In Russ","journal-title":"Mod. High Technol."},{"key":"19_CR19","unstructured":"Mahesh, C.M., Matthias, H.: Variants of RMSProp and adagrad with logarithmic regret bounds. In: Proceedings of the 34th International Conference on Machine Learning, Sydney, Australia (2017)"},{"key":"19_CR20","doi-asserted-by":"crossref","unstructured":"Bottou, L.: Large-scale machine learning with stochastic gradient descent. In: Lechevallier Y., Saporta G. (eds.) Proceedings of COMPSTAT 2010, pp. 177\u2013186. Physica-Verlag HD (2010)","DOI":"10.1007\/978-3-7908-2604-3_16"},{"issue":"3","key":"19_CR21","first-page":"222","volume":"20","author":"I Ispas","year":"2017","unstructured":"Ispas, I., Dragomir, V., Dascalu, M., Zoltan, I., Stoica, C.: Voice based emotion recognition with convolutional neural networks for companion robots. Rom. J. Inf. Sci. Technol. 20(3), 222\u2013240 (2017)","journal-title":"Rom. J. Inf. Sci. Technol."},{"issue":"5","key":"19_CR22","doi-asserted-by":"publisher","first-page":"1057","DOI":"10.1109\/TASL.2010.2076804","volume":"19","author":"E Mower","year":"2011","unstructured":"Mower, E., Mataric, M.J., Narayanan, S.S.: A framework for automatic human emotion classification using emotional profiles. IEEE Trans. Audio Speech Lang. Process. 19(5), 1057\u20131070 (2011)","journal-title":"IEEE Trans. Audio Speech Lang. Process."}],"container-title":["Lecture Notes in Computer Science","Speech and Computer"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-60276-5_19","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,22]],"date-time":"2022-11-22T02:41:34Z","timestamp":1669084894000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-030-60276-5_19"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030602758","9783030602765"],"references-count":22,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-60276-5_19","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"29 September 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"SPECOM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Speech and Computer","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"St. Petersburg","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Russia","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"7 October 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"9 October 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"specom2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/specom.nw.ru\/2020\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"easychair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"160","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"65","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"41% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Due to the Corona pandemic SPECOM 2020 was held as a virtual event","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}