{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,12]],"date-time":"2024-09-12T01:46:17Z","timestamp":1726105577554},"publisher-location":"Cham","reference-count":24,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030638191"},{"type":"electronic","value":"9783030638207"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-63820-7_45","type":"book-chapter","created":{"date-parts":[[2020,11,18]],"date-time":"2020-11-18T09:12:07Z","timestamp":1605690727000},"page":"394-401","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["EMOTIONCAPS - Facial Emotion Recognition Using Capsules"],"prefix":"10.1007","author":[{"given":"Bhavya","family":"Shah","sequence":"first","affiliation":[]},{"given":"Krutarth","family":"Bhatt","sequence":"additional","affiliation":[]},{"given":"Srimanta","family":"Mandal","sequence":"additional","affiliation":[]},{"given":"Suman K.","family":"Mitra","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,11,17]]},"reference":[{"key":"45_CR1","unstructured":"Abien Fred, M.A.: Deep learning using rectified linear units (ReLU). Neural Evol. Comput. 1, 7 p. (2018)"},{"key":"45_CR2","unstructured":"Arriaga, O., Valdenegro, M., Pl\u00f6ger, P.: Real-time convolutional neural networks for emotion and gender classification. In: ESANN, pp. 221\u2013226 (2019)"},{"key":"45_CR3","unstructured":"Carrier, P.L., Courville, A., Goodfellow, I.J., Mirza, M., Bengio, Y.: Fer-2013 face database. Technical report (2013)"},{"key":"45_CR4","doi-asserted-by":"crossref","unstructured":"Dalal, N., Triggs, B.: Histograms of oriented gradients for human detection. In: CVPR, vol. 1, pp. 886\u2013893 (2005)","DOI":"10.1109\/CVPR.2005.177"},{"issue":"1","key":"45_CR5","doi-asserted-by":"publisher","first-page":"259","DOI":"10.1016\/S0031-3203(02)00052-3","volume":"36","author":"B Fasel","year":"2003","unstructured":"Fasel, B., Luettin, J.: Automatic facial expression analysis: a survey. Pattern Recogn. 36(1), 259\u2013275 (2003)","journal-title":"Pattern Recogn."},{"key":"45_CR6","doi-asserted-by":"crossref","unstructured":"Hosseini, S., Cho, N.I.: Gf-CapsNet: Using Gabor jet and capsule networks for facial age, gender, and expression recognition. In: FG, pp. 1\u20138 (2019)","DOI":"10.1109\/FG.2019.8756552"},{"key":"45_CR7","unstructured":"Ioffe, S., Szegedy, C.: Batch normalization: accelerating deep network training by reducing internal covariate shift. In: ICML, ICML 2015, vol. 37, pp. 448\u2013456. JMLR.org (2015)"},{"key":"45_CR8","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization (2014)"},{"issue":"2","key":"45_CR9","doi-asserted-by":"publisher","first-page":"401","DOI":"10.3390\/s18020401","volume":"18","author":"BC Ko","year":"2018","unstructured":"Ko, B.C.: A brief review of facial emotion recognition based on visual information. Sensors 18(2), 401 (2018)","journal-title":"Sensors"},{"issue":"4","key":"45_CR10","first-page":"467","volume":"11","author":"C Liu","year":"2002","unstructured":"Liu, C., Wechsler, H.: Gabor feature based classification using the enhanced fisher linear discriminant model for face recognition. IEEE TIP 11(4), 467\u2013476 (2002)","journal-title":"IEEE TIP"},{"key":"45_CR11","doi-asserted-by":"crossref","unstructured":"Liu, P., Han, S., Meng, Z., Tong, Y.: Facial expression recognition via a boosted deep belief network. In: CVPR, pp. 1805\u20131812 (2014)","DOI":"10.1109\/CVPR.2014.233"},{"key":"45_CR12","doi-asserted-by":"publisher","first-page":"610","DOI":"10.1016\/j.patcog.2016.07.026","volume":"61","author":"AT Lopes","year":"2017","unstructured":"Lopes, A.T., de Aguiar, E., Souza, A.F.D., Oliveira-Santos, T.: Facial expression recognition with convolutional neural networks: coping with few data and the training sample order. Pattern Recogn. 61, 610\u2013628 (2017)","journal-title":"Pattern Recogn."},{"key":"45_CR13","doi-asserted-by":"crossref","unstructured":"Marrero Fernandez, P.D., Guerrero Pena, F.A., Ing Ren, T., Cunha, A.: FERAtt: facial expression recognition with attention net. In: CVPR Workshops, pp. 1\u201310 (2019)","DOI":"10.1109\/CVPRW.2019.00112"},{"key":"45_CR14","unstructured":"Minaee, S., Abdolrashidi, A.: Deep-emotion: facial expression recognition using attentional convolutional network. CoRR abs\/1902.01019 (2019)"},{"key":"45_CR15","doi-asserted-by":"crossref","unstructured":"Mollahosseini, A., Chan, D., Mahoor, M.H.: Going deeper in facial expression recognition using deep neural networks. In: WACV, pp. 1\u201310 (2016)","DOI":"10.1109\/WACV.2016.7477450"},{"key":"45_CR16","unstructured":"Pramerdorfer, C., Kampel, M.: Facial expression recognition using convolutional neural networks:state of the art. CoRR abs\/1612.02903 (2016)"},{"key":"45_CR17","unstructured":"Sabour, S., Frosst, N., Hinton, G.E.: Dynamic routing between capsules. In: NIPS, pp. 3856\u20133866 (2017)"},{"key":"45_CR18","unstructured":"Tang, Y.: Deep learning using linear support vector machines. In: ICML, pp. 1\u20136 (2013)"},{"key":"45_CR19","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"578","DOI":"10.1007\/978-3-642-33885-4_58","volume-title":"Computer Vision \u2013 ECCV 2012. Workshops and Demonstrations","author":"U Tariq","year":"2012","unstructured":"Tariq, U., Yang, J., Huang, T.S.: Multi-view facial expression recognition analysis with generic sparse coding feature. In: Fusiello, A., Murino, V., Cucchiara, R. (eds.) ECCV 2012. LNCS, vol. 7585, pp. 578\u2013588. Springer, Heidelberg (2012). https:\/\/doi.org\/10.1007\/978-3-642-33885-4_58"},{"key":"45_CR20","doi-asserted-by":"crossref","unstructured":"Viola, P., Jones, M., et al.: Rapid object detection using a boosted cascade of simple features. In: CVPR(1), vol. 1, no. 511\u2013518, p. 3 (2001)","DOI":"10.1109\/CVPR.2001.990517"},{"key":"45_CR21","doi-asserted-by":"crossref","unstructured":"Yu, Z., Zhang, C.: Image based static facial expression recognition with multiple deep network learning. In: ICMI, ICMI 2015, pp. 435\u2013442. ACM, New York (2015)","DOI":"10.1145\/2818346.2830595"},{"key":"45_CR22","doi-asserted-by":"publisher","first-page":"643","DOI":"10.1016\/j.neucom.2017.08.043","volume":"273","author":"N Zeng","year":"2018","unstructured":"Zeng, N., Zhang, H., Song, B., Liu, W., Li, Y., Dobaie, A.M.: Facial expression recognition via learning deep sparse autoencoders. Neurocomputing 273, 643\u2013649 (2018)","journal-title":"Neurocomputing"},{"key":"45_CR23","doi-asserted-by":"crossref","unstructured":"Zhang, F., Zhang, T., Mao, Q., Xu, C.: Joint pose and expression modeling for facial expression recognition. In: CVPR, pp. 3359\u20133368 (2018)","DOI":"10.1109\/CVPR.2018.00354"},{"issue":"12","key":"45_CR24","doi-asserted-by":"publisher","first-page":"2528","DOI":"10.1109\/TMM.2016.2598092","volume":"18","author":"T Zhang","year":"2016","unstructured":"Zhang, T., Zheng, W., Cui, Z., Zong, Y., Yan, J., Yan, K.: A deep neural network-driven feature learning method for multi-view facial expression recognition. IEEE Trans. Multimed. 18(12), 2528\u20132536 (2016)","journal-title":"IEEE Trans. Multimed."}],"container-title":["Communications in Computer and Information Science","Neural Information Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-63820-7_45","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,17]],"date-time":"2024-08-17T15:58:37Z","timestamp":1723910317000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-63820-7_45"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030638191","9783030638207"],"references-count":24,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-63820-7_45","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"17 November 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICONIP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Neural Information Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Bangkok","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Thailand","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18 November 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22 November 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iconip2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.apnns.org\/ICONIP2020","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"618","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"187","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"189","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"30% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.18","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.68","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Due to COVID-19 pandemic the conference was held virtually.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}