{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,7,3]],"date-time":"2024-07-03T00:24:16Z","timestamp":1719966256329},"publisher-location":"Cham","reference-count":43,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031585340","type":"print"},{"value":"9783031585357","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-58535-7_8","type":"book-chapter","created":{"date-parts":[[2024,7,2]],"date-time":"2024-07-02T17:01:50Z","timestamp":1719939710000},"page":"90-101","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Federated Scaling of\u00a0Pre-trained Models for\u00a0Deep Facial Expression Recognition"],"prefix":"10.1007","author":[{"ORCID":"http:\/\/orcid.org\/0009-0002-5426-4071","authenticated-orcid":false,"given":"P. V. N. Pooja","family":"Srihitha","sequence":"first","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0002-9772-240X","authenticated-orcid":false,"given":"Mridula","family":"Verma","sequence":"additional","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0002-5560-7649","authenticated-orcid":false,"given":"Munaga V. N. K.","family":"Prasad","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,7,3]]},"reference":[{"key":"8_CR1","unstructured":"Alom, M.Z., et al.: The history began from alexnet: a comprehensive survey on deep learning approaches. arXiv preprint arXiv:1803.01164 (2018)"},{"key":"8_CR2","series-title":"Advances in Intelligent Systems and Computing","doi-asserted-by":"publisher","first-page":"459","DOI":"10.1007\/978-981-16-5207-3_38","volume-title":"Proceedings of International Conference on Advanced Computing Applications","author":"S Bandyopadhyay","year":"2022","unstructured":"Bandyopadhyay, S., Thakur, S.S., Mandal, J.K.: Online recommendation system using human facial expression based emotion detection: a proposed method. In: Mandal, J.K., Buyya, R., De, D. (eds.) Proceedings of International Conference on Advanced Computing Applications. AISC, vol. 1406, pp. 459\u2013468. Springer, Singapore (2022). https:\/\/doi.org\/10.1007\/978-981-16-5207-3_38"},{"key":"8_CR3","first-page":"374","volume":"1","author":"K Bonawitz","year":"2019","unstructured":"Bonawitz, K., et al.: Towards federated learning at scale: system design. Proc. Mach. Learn. Syst. 1, 374\u2013388 (2019)","journal-title":"Proc. Mach. Learn. Syst."},{"key":"8_CR4","doi-asserted-by":"crossref","unstructured":"Chen, F., Long, G., Wu, Z., Zhou, T., Jiang, J.: Personalized federated learning with graph. arXiv preprint arXiv:2203.00829 (2022)","DOI":"10.24963\/ijcai.2022\/357"},{"key":"8_CR5","unstructured":"Chen, H.Y., Tu, C.H., Li, Z., Shen, H.W., Chao, W.L.: On pre-training for federated learning. arXiv preprint arXiv:2206.11488 (2022)"},{"key":"8_CR6","unstructured":"Chen, J., Xu, W., Guo, S., Wang, J., Zhang, J., Wang, H.: Fedtune: a deep dive into efficient federated fine-tuning with pre-trained transformers. arXiv preprint arXiv:2211.08025 (2022)"},{"key":"8_CR7","doi-asserted-by":"crossref","unstructured":"Chollet, F.: Xception: Deep learning with depthwise separable convolutions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1251\u20131258 (2017)","DOI":"10.1109\/CVPR.2017.195"},{"key":"8_CR8","doi-asserted-by":"publisher","first-page":"9848","DOI":"10.1109\/ACCESS.2019.2891668","volume":"7","author":"J Deng","year":"2019","unstructured":"Deng, J., Pang, G., Zhang, Z., Pang, Z., Yang, H., Yang, G.: CGAN based facial expression recognition for human-robot interaction. IEEE Access 7, 9848\u20139859 (2019)","journal-title":"IEEE Access"},{"key":"8_CR9","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16x16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"issue":"8","key":"8_CR10","doi-asserted-by":"publisher","first-page":"11365","DOI":"10.1007\/s11042-022-13558-9","volume":"82","author":"S Gupta","year":"2023","unstructured":"Gupta, S., Kumar, P., Tekchandani, R.K.: Facial emotion recognition based real-time learner engagement detection system in online learning context using deep learning models. Multimedia Tools Appl. 82(8), 11365\u201311394 (2023)","journal-title":"Multimedia Tools Appl."},{"key":"8_CR11","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"8_CR12","doi-asserted-by":"publisher","first-page":"35","DOI":"10.1016\/j.ins.2021.08.043","volume":"580","author":"Q Huang","year":"2021","unstructured":"Huang, Q., Huang, C., Wang, X., Jiang, F.: Facial expression recognition with grid-wise attention and visual transformer. Inf. Sci. 580, 35\u201354 (2021)","journal-title":"Inf. Sci."},{"key":"8_CR13","doi-asserted-by":"publisher","first-page":"1005","DOI":"10.1109\/TCE.2023.3263672","volume":"69","author":"X Ji","year":"2023","unstructured":"Ji, X., Dong, Z., Han, Y., Lai, C.S., Zhou, G., Qi, D.: EMSN: an energy-efficient memristive sequencer network for human emotion classification in mental health monitoring. IEEE Trans. Consum. Electron. 69, 1005\u20131016 (2023)","journal-title":"IEEE Trans. Consum. Electron."},{"key":"8_CR14","doi-asserted-by":"publisher","unstructured":"Kahou, S.E., et al.: Emonets: multimodal deep learning approaches for emotion recognition in video. J. Multimodal User Interfaces 10 (2015). https:\/\/doi.org\/10.1007\/s12193-015-0195-2","DOI":"10.1007\/s12193-015-0195-2"},{"issue":"23","key":"8_CR15","doi-asserted-by":"publisher","first-page":"1326","DOI":"10.1049\/el.2018.6932","volume":"54","author":"T Kim","year":"2018","unstructured":"Kim, T., Yu, C., Lee, S.: Facial expression recognition using feature additive pooling and progressive fine-tuning of CNN. Electron. Lett. 54(23), 1326\u20131328 (2018)","journal-title":"Electron. Lett."},{"key":"8_CR16","doi-asserted-by":"crossref","unstructured":"Knyazev, B., Shvetsov, R., Efremova, N., Kuharenko, A.: Convolutional neural networks pretrained on large face recognition datasets for emotion classification from video. arXiv preprint arXiv:1711.04598 (2017)","DOI":"10.1109\/FG.2018.00109"},{"key":"8_CR17","unstructured":"Kone\u010dn\u1ef3, J., McMahan, H.B., Ramage, D., Richt\u00e1rik, P.: Federated optimization: distributed machine learning for on-device intelligence. arXiv preprint arXiv:1610.02527 (2016)"},{"key":"8_CR18","doi-asserted-by":"publisher","unstructured":"Li, L., Fan, Y., Tse, M., Lin, K.Y.: A review of applications in federated learning. Comput. Ind. Eng. 149, 106854 (2020). https:\/\/doi.org\/10.1016\/j.cie.2020.106854, https:\/\/www.sciencedirect.com\/science\/article\/pii\/S0360835220305532","DOI":"10.1016\/j.cie.2020.106854"},{"key":"8_CR19","doi-asserted-by":"publisher","first-page":"1195","DOI":"10.1109\/TAFFC.2020.2981446","volume":"13","author":"S Li","year":"2020","unstructured":"Li, S., Deng, W.: Deep facial expression recognition: a survey. IEEE Trans. Affect. Comput. 13, 1195\u20131215 (2020)","journal-title":"IEEE Trans. Affect. Comput."},{"issue":"3","key":"8_CR20","doi-asserted-by":"publisher","first-page":"50","DOI":"10.1109\/MSP.2020.2975749","volume":"37","author":"T Li","year":"2020","unstructured":"Li, T., Sahu, A.K., Talwalkar, A., Smith, V.: Federated learning: challenges, methods, and future directions. IEEE Signal Process. Mag. 37(3), 50\u201360 (2020)","journal-title":"IEEE Signal Process. Mag."},{"key":"8_CR21","doi-asserted-by":"publisher","first-page":"102723","DOI":"10.1016\/j.jvcir.2019.102723","volume":"71","author":"Z Liu","year":"2020","unstructured":"Liu, Z., Peng, Y., Hu, W.: Driver fatigue detection based on deeply-learned facial expression representation. J. Vis. Commun. Image Represent. 71, 102723 (2020)","journal-title":"J. Vis. Commun. Image Represent."},{"key":"8_CR22","doi-asserted-by":"crossref","unstructured":"Luo, C., Fan, X., Yan, Y., Jin, H., Wang, X.: Optimization of three-dimensional face recognition algorithms in financial identity authentication. Int. J. Comput. Commun. Control 17(3) (2022)","DOI":"10.15837\/ijccc.2022.3.3744"},{"key":"8_CR23","unstructured":"Ma, F., Sun, B., Li, S.: Robust facial expression recognition with convolutional visual transformers. arXiv preprint arXiv:2103.16854 (2021)"},{"issue":"5","key":"8_CR24","doi-asserted-by":"publisher","first-page":"850","DOI":"10.1049\/iet-ipr.2018.5683","volume":"13","author":"M Mandal","year":"2019","unstructured":"Mandal, M., Verma, M., Mathur, S., Vipparthi, S.K., Murala, S., Kranthi Kumar, D.: Regional adaptive affinitive patterns (RADAP) with logical operators for facial expression recognition. IET Image Proc. 13(5), 850\u2013861 (2019)","journal-title":"IET Image Proc."},{"key":"8_CR25","unstructured":"McMahan, B., Moore, E., Ramage, D., Hampson, S., Arcas, B.A.V.: Communication-efficient learning of deep networks from decentralized data. In: Singh, A., Zhu, J. (eds.) Proceedings of the 20th International Conference on Artificial Intelligence and Statistics. Proceedings of Machine Learning Research, vol.\u00a054, pp. 1273\u20131282. PMLR, 20\u201322 April 2017. https:\/\/proceedings.mlr.press\/v54\/mcmahan17a.html"},{"key":"8_CR26","doi-asserted-by":"publisher","first-page":"1640","DOI":"10.1016\/j.procs.2023.01.142","volume":"218","author":"G Meena","year":"2023","unstructured":"Meena, G., Mohbey, K.K.: Sentiment analysis on images using different transfer learning models. Procedia Comput. Sci. 218, 1640\u20131649 (2023)","journal-title":"Procedia Comput. Sci."},{"key":"8_CR27","unstructured":"Meng, Q., Zhou, F., Ren, H., Feng, T., Liu, G., Lin, Y.: Improving federated learning face recognition via privacy-agnostic clusters. arXiv preprint arXiv:2201.12467 (2022)"},{"key":"8_CR28","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TIM.2020.3031835","volume":"70","author":"K Mohan","year":"2020","unstructured":"Mohan, K., Seal, A., Krejcar, O., Yazidi, A.: Facial expression recognition using local gravitational force descriptor-based deep convolution neural networks. IEEE Trans. Instrum. Meas. 70, 1\u201312 (2020)","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"8_CR29","unstructured":"Nguyen, J., Malik, K., Sanjabi, M., Rabbat, M.: Where to begin? exploring the impact of pre-training and initialization in federated learning. arXiv preprint arXiv:2206.15387 (2022)"},{"key":"8_CR30","series-title":"Advances in Intelligent Systems and Computing","doi-asserted-by":"publisher","first-page":"585","DOI":"10.1007\/978-3-030-72657-7_56","volume-title":"Trends and Applications in Information Systems and Technologies","author":"R P\u00e1vez","year":"2021","unstructured":"P\u00e1vez, R., D\u00edaz, J., Arango-L\u00f3pez, J., Ahumada, D., M\u00e9ndez, C., Moreira, F.: Emotion recognition in children with autism spectrum disorder using convolutional neural networks. In: Rocha, \u00c1., Adeli, H., Dzemyda, G., Moreira, F., Ramalho Correia, A.M. (eds.) WorldCIST 2021. AISC, vol. 1365, pp. 585\u2013595. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-72657-7_56"},{"issue":"11","key":"8_CR31","doi-asserted-by":"publisher","first-page":"7665","DOI":"10.1109\/TII.2022.3145862","volume":"18","author":"MD Putro","year":"2022","unstructured":"Putro, M.D., Nguyen, D.L., Jo, K.H.: A fast CPU real-time facial expression detector using sequential attention network for human-robot interaction. IEEE Trans. Industr. Inf. 18(11), 7665\u20137674 (2022)","journal-title":"IEEE Trans. Industr. Inf."},{"key":"8_CR32","doi-asserted-by":"crossref","unstructured":"Salman, A., Busso, C.: Privacy preserving personalization for video facial expression recognition using federated learning. In: Proceedings of the 2022 International Conference on Multimodal Interaction, pp. 495\u2013503 (2022)","DOI":"10.1145\/3536221.3556614"},{"key":"8_CR33","doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.C.: Mobilenetv2: inverted residuals and linear bottlenecks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4510\u20134520 (2018)","DOI":"10.1109\/CVPR.2018.00474"},{"key":"8_CR34","doi-asserted-by":"publisher","first-page":"16493","DOI":"10.1109\/ACCESS.2023.3243850","volume":"11","author":"T Shahzad","year":"2023","unstructured":"Shahzad, T., Iqbal, K., Khan, M.A., Iqbal, N., et al.: Role of zoning in facial expression using deep learning. IEEE Access 11, 16493\u201316508 (2023)","journal-title":"IEEE Access"},{"key":"8_CR35","unstructured":"Shao, R., Perera, P., Yuen, P.C., Patel, V.M.: Federated face presentation attack detection. arXiv preprint arXiv:2005.14638 (2020)"},{"key":"8_CR36","doi-asserted-by":"publisher","first-page":"36961","DOI":"10.1109\/ACCESS.2023.3264268","volume":"11","author":"D Shehada","year":"2023","unstructured":"Shehada, D., Turky, A., Khan, W., Khan, B., Hussain, A.: A lightweight facial emotion recognition system using partial transfer learning for visually impaired people. IEEE Access 11, 36961\u201336969 (2023)","journal-title":"IEEE Access"},{"key":"8_CR37","doi-asserted-by":"crossref","unstructured":"Shome, D., Kar, T.: Fedaffect: few-shot federated learning for facial expression recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 4168\u20134175 (2021)","DOI":"10.1109\/ICCVW54120.2021.00463"},{"key":"8_CR38","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)"},{"key":"8_CR39","unstructured":"Sun, G., Mendieta, M., Yang, T., Chen, C.: Exploring parameter-efficient fine-tuning for improving communication efficiency in federated learning. arXiv preprint arXiv:2210.01708 (2022)"},{"key":"8_CR40","doi-asserted-by":"publisher","first-page":"11823","DOI":"10.1109\/TII.2023.3253188","volume":"19","author":"M Sun","year":"2023","unstructured":"Sun, M., et al.: Attention-rectified and texture-enhanced cross-attention transformer feature fusion network for facial expression recognition. IEEE Trans. Ind. Inf. 19, 11823\u201311832 (2023)","journal-title":"IEEE Trans. Ind. Inf."},{"key":"8_CR41","doi-asserted-by":"crossref","unstructured":"Weller, O., Marone, M., Braverman, V., Lawrie, D., Van\u00a0Durme, B.: Pretrained models for multilingual federated learning. arXiv preprint arXiv:2206.02291 (2022)","DOI":"10.18653\/v1\/2022.naacl-main.101"},{"key":"8_CR42","doi-asserted-by":"publisher","first-page":"64487","DOI":"10.1109\/ACCESS.2021.3075389","volume":"9","author":"H Zang","year":"2021","unstructured":"Zang, H., Foo, S.Y., Bernadin, S., Meyer-Baese, A.: Facial emotion recognition using asymmetric pyramidal networks with gradient centralization. IEEE Access 9, 64487\u201364498 (2021)","journal-title":"IEEE Access"},{"key":"8_CR43","doi-asserted-by":"crossref","unstructured":"Zhang, L., Shen, L., Ding, L., Tao, D., Duan, L.Y.: Fine-tuning global model via data-free knowledge distillation for non-iid federated learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10174\u201310183 (2022)","DOI":"10.1109\/CVPR52688.2022.00993"}],"container-title":["Communications in Computer and Information Science","Computer Vision and Image Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-58535-7_8","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,2]],"date-time":"2024-07-02T17:03:17Z","timestamp":1719939797000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-58535-7_8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031585340","9783031585357"],"references-count":43,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-58535-7_8","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"value":"1865-0929","type":"print"},{"value":"1865-0937","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"3 July 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"CVIP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Computer Vision and Image Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Jammu","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"India","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"3 November 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 November 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"cvip2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/iitjammu.ac.in\/cvip2023\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Online CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"461","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"140","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"30% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}