{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,2,17]],"date-time":"2024-02-17T12:10:23Z","timestamp":1708171823617},"reference-count":35,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2024,1,16]],"date-time":"2024-01-16T00:00:00Z","timestamp":1705363200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,16]],"date-time":"2024-01-16T00:00:00Z","timestamp":1705363200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"the National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["61977027"],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2024,2]]},"DOI":"10.1007\/s00530-023-01219-2","type":"journal-article","created":{"date-parts":[[2024,1,16]],"date-time":"2024-01-16T02:03:14Z","timestamp":1705370594000},"update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Facial expression intensity estimation using label-distribution-learning-enhanced ordinal regression"],"prefix":"10.1007","volume":"30","author":[{"given":"Ruyi","family":"Xu","sequence":"first","affiliation":[]},{"given":"Zhun","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Jingying","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Longpu","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,1,16]]},"reference":[{"key":"1219_CR1","doi-asserted-by":"publisher","unstructured":"Yang, P., Liu, Q., Metaxas, D.N.: Rankboost with l1 regularization for facial expression recognition and intensity estimation. In: 2009 IEEE 12th International Conference on Computer Vision, pp. 1018\u20131025 (2009). https:\/\/doi.org\/10.1109\/ICCV.2009.5459371","DOI":"10.1109\/ICCV.2009.5459371"},{"key":"1219_CR2","doi-asserted-by":"publisher","unstructured":"Rudovic, O., Pavlovic, V., Pantic, M.: Multi-output laplacian dynamic ordinal regression for facial expression recognition and intensity estimation, pp. 2634\u20132641 (2012). https:\/\/doi.org\/10.1109\/CVPR.2012.6247983","DOI":"10.1109\/CVPR.2012.6247983"},{"key":"1219_CR3","doi-asserted-by":"publisher","first-page":"143","DOI":"10.1016\/j.neucom.2018.06.054","volume":"313","author":"M Sabri","year":"2018","unstructured":"Sabri, M., Kurita, T.: Facial expression intensity estimation using siamese and triplet networks. Neurocomputing 313, 143\u2013154 (2018). https:\/\/doi.org\/10.1016\/j.neucom.2018.06.054","journal-title":"Neurocomputing"},{"key":"1219_CR4","doi-asserted-by":"publisher","unstructured":"Saha, C., Ghosh, K.: Estimation of facial expression intensity from a sequence of binary face images, pp. 1\u20136 (2011). https:\/\/doi.org\/10.1109\/ICIIP.2011.6108935","DOI":"10.1109\/ICIIP.2011.6108935"},{"key":"1219_CR5","doi-asserted-by":"publisher","unstructured":"Ming, Z., Bugeau, A., Rouas, J.-L., Shochi, T.: Facial action units intensity estimation by the fusion of features with multi-kernel support vector machine. In: 2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG), vol. 06, pp. 1\u20136 (2015). https:\/\/doi.org\/10.1109\/FG.2015.7284870","DOI":"10.1109\/FG.2015.7284870"},{"key":"1219_CR6","unstructured":"Batista, J.C., Bellon, O.R., Silva, L.: Landmark-free smile intensity estimation. In: Workshop Conf. Graphics (2016)"},{"key":"1219_CR7","doi-asserted-by":"publisher","unstructured":"Lee, K.K., Xu, Y.: Real-time estimation of facial expression intensity. In: 2003 IEEE International Conference on Robotics and Automation (Cat. No.03CH37422), vol. 2, pp. 2567\u201325722 (2003). https:\/\/doi.org\/10.1109\/ROBOT.2003.1241979","DOI":"10.1109\/ROBOT.2003.1241979"},{"key":"1219_CR8","doi-asserted-by":"publisher","first-page":"25","DOI":"10.1016\/j.patrec.2017.04.003","volume":"92","author":"SKA Kamarol","year":"2017","unstructured":"Kamarol, S.K.A., Jaward, M.H., K\u00e4lvi\u00e4inen, H., Parkkinen, J., Parthiban, R.: Joint facial expression recognition and intensity estimation based on weighted votes of image sequences. Pattern Recogn. Lett. 92, 25\u201332 (2017). https:\/\/doi.org\/10.1016\/j.patrec.2017.04.003","journal-title":"Pattern Recogn. Lett."},{"key":"1219_CR9","doi-asserted-by":"publisher","first-page":"113","DOI":"10.1016\/j.ins.2020.04.012","volume":"528","author":"M Xue","year":"2020","unstructured":"Xue, M., Duan, X., Liu, W., Ren, Y.: A semantic facial expression intensity descriptor based on information granules. Inf. Sci. 528, 113\u2013132 (2020). https:\/\/doi.org\/10.1016\/j.ins.2020.04.012","journal-title":"Inf. Sci."},{"key":"1219_CR10","doi-asserted-by":"publisher","unstructured":"Lien, J.J.-J., Kanade, T., Cohn, J.F., Li, C.-C.: Subtly different facial expression recognition and expression intensity estimation. In: Proceedings. 1998 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (Cat. No.98CB36231), pp. 853\u2013859 (1998). https:\/\/doi.org\/10.1109\/CVPR.1998.698704","DOI":"10.1109\/CVPR.1998.698704"},{"key":"1219_CR11","doi-asserted-by":"publisher","unstructured":"Liao, C.-T., Chuang, H.-J., Lai, S.-H.: Learning expression kernels for facial expression intensity estimation. In: 2012 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 2217\u20132220 (2012). https:\/\/doi.org\/10.1109\/ICASSP.2012.6288354","DOI":"10.1109\/ICASSP.2012.6288354"},{"key":"1219_CR12","doi-asserted-by":"publisher","DOI":"10.1145\/3136625","author":"J Li","year":"2017","unstructured":"Li, J., Cheng, K., Wang, S., Morstatter, F., Trevino, R.P., Tang, J., Liu, H.: Feature selection: a data perspective. ACM Comput. Surv. (2017). https:\/\/doi.org\/10.1145\/3136625","journal-title":"ACM Comput. Surv."},{"key":"1219_CR13","doi-asserted-by":"publisher","first-page":"203","DOI":"10.2991\/ijndc.2016.4.4.1","volume":"4","author":"H Nomiya","year":"2016","unstructured":"Nomiya, H., Sakaue, S., Hochin, T.: Recognition and intensity estimation of facial expression using ensemble classifiers. Int. J. Network. Distrib. Comput. 4, 203\u2013211 (2016). https:\/\/doi.org\/10.2991\/ijndc.2016.4.4.1","journal-title":"Int. J. Network. Distrib. Comput."},{"issue":"3","key":"1219_CR14","doi-asserted-by":"publisher","first-page":"817","DOI":"10.1109\/TCYB.2015.2416317","volume":"46","author":"MR Mohammadi","year":"2016","unstructured":"Mohammadi, M.R., Fatemizadeh, E., Mahoor, M.H.: Intensity estimation of spontaneous facial action units based on their sparsity properties. IEEE Trans. Cybern. 46(3), 817\u2013826 (2016). https:\/\/doi.org\/10.1109\/TCYB.2015.2416317","journal-title":"IEEE Trans. Cybern."},{"key":"1219_CR15","doi-asserted-by":"publisher","unstructured":"Zhang, Y., Wu, B., Dong, W., Li, Z., Liu, W., Hu, B.-G., Ji, Q.: Joint representation and estimator learning for facial action unit intensity estimation. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3452\u20133461 (2019). https:\/\/doi.org\/10.1109\/CVPR.2019.00357","DOI":"10.1109\/CVPR.2019.00357"},{"key":"1219_CR16","doi-asserted-by":"publisher","unstructured":"Lu, G., Zhang, W.: Happiness intensity estimation for a group of people in images using convolutional neural networks. In: 2019 3rd International Conference on Electronic Information Technology and Computer Engineering (EITCE), pp. 1707\u20131710 (2019). https:\/\/doi.org\/10.1109\/EITCE47263.2019.9094832","DOI":"10.1109\/EITCE47263.2019.9094832"},{"key":"1219_CR17","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-019-01191-3","author":"M Tavakolian","year":"2019","unstructured":"Tavakolian, M., Hadid, A.: A spatiotemporal convolutional neural network for automatic pain intensity estimation from facial dynamics. Int. J. Comput. Vision (2019). https:\/\/doi.org\/10.1007\/s11263-019-01191-3","journal-title":"Int. J. Comput. Vision"},{"issue":"1","key":"1219_CR18","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1109\/TAFFC.2021.3061605","volume":"14","author":"I Ntinou","year":"2023","unstructured":"Ntinou, I., Sanchez, E., Bulat, A., Valstar, M., Tzimiropoulos, G.: A transfer learning approach to heatmap regression for action unit intensity estimation. IEEE Trans. Affect. Comput. 14(1), 436\u2013450 (2023). https:\/\/doi.org\/10.1109\/TAFFC.2021.3061605","journal-title":"IEEE Trans. Affect. Comput."},{"issue":"07","key":"1219_CR19","first-page":"12701","volume":"34","author":"Y Fan","year":"2020","unstructured":"Fan, Y., Lam, J., Li, V.: Facial action unit intensity estimation via semantic correspondence learning with dynamic graph convolution. Proc. AAAI Conf. Artif. Intell. 34(07), 12701\u201312708 (2020)","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"1219_CR20","doi-asserted-by":"publisher","unstructured":"Batista, J.C., Albiero, V., Bellon, O.R.P., Silva, L.: Aumpnet: Simultaneous action units detection and intensity estimation on multipose facial images using a single convolutional neural network. In: 2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017), pp. 866\u2013871 (2017). https:\/\/doi.org\/10.1109\/FG.2017.111","DOI":"10.1109\/FG.2017.111"},{"key":"1219_CR21","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1016\/j.patrec.2020.09.012","volume":"140","author":"M Tavakolian","year":"2020","unstructured":"Tavakolian, M., Bordallo Lopez, M., Liu, L.: Self-supervised pain intensity estimation from facial videos via statistical spatiotemporal distillation. Pattern Recogn. Lett. 140, 26\u201333 (2020). https:\/\/doi.org\/10.1016\/j.patrec.2020.09.012","journal-title":"Pattern Recogn. Lett."},{"key":"1219_CR22","doi-asserted-by":"publisher","unstructured":"Song, X., Shi, T., Feng, Z., Song, M., Lin, J., Lin, C., Fan, C., Yuan, Y.: Unsupervised learning facial parameter regressor for action unit intensity estimation via differentiable renderer. In: Proceedings of the 28th ACM International Conference on Multimedia (2020). https:\/\/doi.org\/10.1145\/3394171.3413955","DOI":"10.1145\/3394171.3413955"},{"key":"1219_CR23","doi-asserted-by":"publisher","unstructured":"Zhao, R., Gan, Q., Wang, S., Ji, Q.: Facial expression intensity estimation using ordinal information. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3466\u20133474 (2016). https:\/\/doi.org\/10.1109\/CVPR.2016.377","DOI":"10.1109\/CVPR.2016.377"},{"key":"1219_CR24","doi-asserted-by":"publisher","unstructured":"Zhang, Y., Dong, W., Hu, B.-G., Ji, Q.: Weakly-supervised deep convolutional neural network learning for facial action unit intensity estimation. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2314\u20132323 (2018). https:\/\/doi.org\/10.1109\/CVPR.2018.00246","DOI":"10.1109\/CVPR.2018.00246"},{"issue":"7","key":"1219_CR25","doi-asserted-by":"publisher","first-page":"1734","DOI":"10.1109\/TKDE.2016.2545658","volume":"28","author":"X Geng","year":"2016","unstructured":"Geng, X.: Label distribution learning. IEEE Trans. Knowl. Data Eng. 28(7), 1734\u20131748 (2016). https:\/\/doi.org\/10.1109\/TKDE.2016.2545658","journal-title":"IEEE Trans. Knowl. Data Eng."},{"issue":"6","key":"1219_CR26","doi-asserted-by":"publisher","first-page":"2825","DOI":"10.1109\/TIP.2017.2689998","volume":"26","author":"B-B Gao","year":"2017","unstructured":"Gao, B.-B., Xing, C., Xie, C.-W., Wu, J., Geng, X.: Deep label distribution learning with label ambiguity. IEEE Trans. Image Process. 26(6), 2825\u20132838 (2017). https:\/\/doi.org\/10.1109\/TIP.2017.2689998","journal-title":"IEEE Trans. Image Process."},{"key":"1219_CR27","doi-asserted-by":"publisher","unstructured":"D\u00edaz, R., Marathe, A.: Soft labels for ordinal regression. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4733\u20134742 (2019). https:\/\/doi.org\/10.1109\/CVPR.2019.00487","DOI":"10.1109\/CVPR.2019.00487"},{"key":"1219_CR28","doi-asserted-by":"publisher","unstructured":"Lucey, P., Cohn, J.F., Kanade, T., Saragih, J., Ambadar, Z., Matthews, I.: The extended cohn-kanade dataset (ck+): A complete dataset for action unit and emotion-specified expression. In: 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops, pp. 94\u2013101 (2010). https:\/\/doi.org\/10.1109\/CVPRW.2010.5543262","DOI":"10.1109\/CVPRW.2010.5543262"},{"key":"1219_CR29","doi-asserted-by":"publisher","unstructured":"Zhang, X., Yin, L., Cohn, J.F., Canavan, S., Reale, M., Horowitz, A., Liu, P.: A high-resolution spontaneous 3d dynamic facial expression database. In: 2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG), pp. 1\u20136 (2013). https:\/\/doi.org\/10.1109\/FG.2013.6553788","DOI":"10.1109\/FG.2013.6553788"},{"key":"1219_CR30","doi-asserted-by":"publisher","unstructured":"Lucey, P., Cohn, J.F., Prkachin, K.M., Solomon, P.E., Matthews, I.: Painful data: The unbc-mcmaster shoulder pain expression archive database. In: 2011 IEEE International Conference on Automatic Face & Gesture Recognition (FG), pp. 57\u201364 (2011). https:\/\/doi.org\/10.1109\/FG.2011.5771462","DOI":"10.1109\/FG.2011.5771462"},{"issue":"2","key":"1219_CR31","doi-asserted-by":"publisher","first-page":"267","DOI":"10.1016\/j.pain.2008.04.010","volume":"139","author":"KM Prkachin","year":"2008","unstructured":"Prkachin, K.M., Solomon, P.E.: The structure, reliability and validity of pain expression: evidence from patients with shoulder pain. Pain 139(2), 267\u2013274 (2008). https:\/\/doi.org\/10.1016\/j.pain.2008.04.010","journal-title":"Pain"},{"issue":"2","key":"1219_CR32","doi-asserted-by":"publisher","first-page":"420","DOI":"10.1037\/0033-2909.86.2.420","volume":"86","author":"PE Shrout","year":"1979","unstructured":"Shrout, P.E., Fleiss, J.L.: Intraclass correlations: uses in assessing rater reliability. Psychol. Bull. 86(2), 420\u20138 (1979)","journal-title":"Psychol. Bull."},{"key":"1219_CR33","unstructured":"Drucker, H., Burges, C.J.C., Kaufman, L., Smola, A., Vapnik, V.: Support vector regression machines. In: Proceedings of the 9th International Conference on Neural Information Processing Systems, pp. 155\u2013161 (1996)"},{"key":"1219_CR34","doi-asserted-by":"publisher","unstructured":"Chu, W., Keerthi, S.S.: New approaches to support vector ordinal regression. In: Proceedings of the 22nd International Conference on Machine Learning, pp. 145\u2013152 (2005). https:\/\/doi.org\/10.1145\/1102351.1102370","DOI":"10.1145\/1102351.1102370"},{"key":"1219_CR35","doi-asserted-by":"crossref","unstructured":"Xu, R., Han, J., Chen, J.: Ordinal information based facial expression intensity estimation for emotional interaction: a novel semi-supervised deep learning approach. Computing 1\u201318 (2022)","DOI":"10.1007\/s00607-022-01140-y"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-023-01219-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-023-01219-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-023-01219-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,17]],"date-time":"2024-02-17T11:32:05Z","timestamp":1708169525000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-023-01219-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,1,16]]},"references-count":35,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2024,2]]}},"alternative-id":["1219"],"URL":"https:\/\/doi.org\/10.1007\/s00530-023-01219-2","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,1,16]]},"assertion":[{"value":"11 May 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 December 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 January 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"13"}}