{"id":"https://openalex.org/W4372270299","doi":"https://doi.org/10.1109/icassp49357.2023.10094895","title":"Fast Yet Effective Speech Emotion Recognition with Self-Distillation","display_name":"Fast Yet Effective Speech Emotion Recognition with Self-Distillation","publication_year":2023,"publication_date":"2023-05-05","ids":{"openalex":"https://openalex.org/W4372270299","doi":"https://doi.org/10.1109/icassp49357.2023.10094895"},"language":"en","primary_location":{"is_oa":true,"landing_page_url":"https://doi.org/10.1109/icassp49357.2023.10094895","pdf_url":null,"source":{"id":"https://openalex.org/S4363607702","display_name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","issn_l":null,"issn":null,"is_oa":false,"is_in_doaj":false,"is_core":false,"host_organization":null,"host_organization_name":null,"host_organization_lineage":[],"host_organization_lineage_names":[],"type":"conference"},"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true},"type":"article","type_crossref":"proceedings-article","indexed_in":["crossref"],"open_access":{"is_oa":true,"oa_status":"green","oa_url":"https://doi.org/10.1109/icassp49357.2023.10094895","any_repository_has_fulltext":true},"authorships":[{"author_position":"first","author":{"id":"https://openalex.org/A5063262277","display_name":"Zhao Ren","orcid":"https://orcid.org/0000-0003-0707-5016"},"institutions":[{"id":"https://openalex.org/I4210136150","display_name":"L3S Research Center","ror":"https://ror.org/039t4wk02","country_code":"DE","type":"facility","lineage":["https://openalex.org/I114112103","https://openalex.org/I4210136150","https://openalex.org/I94509681"]},{"id":"https://openalex.org/I114112103","display_name":"Leibniz University Hannover","ror":"https://ror.org/0304hq317","country_code":"DE","type":"education","lineage":["https://openalex.org/I114112103"]}],"countries":["DE"],"is_corresponding":false,"raw_author_name":"Zhao Ren","raw_affiliation_strings":["L3S Research Center, Leibniz University Hannover, Germany"],"affiliations":[{"raw_affiliation_string":"L3S Research Center, Leibniz University Hannover, Germany","institution_ids":["https://openalex.org/I4210136150","https://openalex.org/I114112103"]}]},{"author_position":"middle","author":{"id":"https://openalex.org/A5101918597","display_name":"Th\u00e0nh T\u00e2m Nguy\u00ean","orcid":"https://orcid.org/0000-0002-2586-7757"},"institutions":[{"id":"https://openalex.org/I11701301","display_name":"Griffith University","ror":"https://ror.org/02sc3r913","country_code":"AU","type":"education","lineage":["https://openalex.org/I11701301"]}],"countries":["AU"],"is_corresponding":false,"raw_author_name":"Thanh Tam Nguyen","raw_affiliation_strings":["Griffith University, Australia"],"affiliations":[{"raw_affiliation_string":"Griffith University, Australia","institution_ids":["https://openalex.org/I11701301"]}]},{"author_position":"middle","author":{"id":"https://openalex.org/A5007780657","display_name":"Yi Hua Chang","orcid":"https://orcid.org/0000-0003-0112-4139"},"institutions":[{"id":"https://openalex.org/I47508984","display_name":"Imperial College London","ror":"https://ror.org/041kmwe10","country_code":"GB","type":"education","lineage":["https://openalex.org/I47508984"]}],"countries":["GB"],"is_corresponding":false,"raw_author_name":"Yi Chang","raw_affiliation_strings":["Audio, & Music, Imperial College London,GLAM – Group on Language,United Kingdom"],"affiliations":[{"raw_affiliation_string":"Audio, & Music, Imperial College London,GLAM – Group on Language,United Kingdom","institution_ids":["https://openalex.org/I47508984"]}]},{"author_position":"last","author":{"id":"https://openalex.org/A5043060302","display_name":"Bj\u00f6rn Sch\u00fcller","orcid":"https://orcid.org/0000-0002-6478-8699"},"institutions":[{"id":"https://openalex.org/I47508984","display_name":"Imperial College London","ror":"https://ror.org/041kmwe10","country_code":"GB","type":"education","lineage":["https://openalex.org/I47508984"]},{"id":"https://openalex.org/I179225836","display_name":"University of Augsburg","ror":"https://ror.org/03p14d497","country_code":"DE","type":"education","lineage":["https://openalex.org/I179225836"]}],"countries":["DE","GB"],"is_corresponding":false,"raw_author_name":"Bj\u00f6rn W. Schuller","raw_affiliation_strings":["Audio, & Music, Imperial College London,GLAM – Group on Language,United Kingdom","Chair of Embedded Intelligence for Health Care and Wellbeing, University of Augsburg, Germany"],"affiliations":[{"raw_affiliation_string":"Audio, & Music, Imperial College London,GLAM – Group on Language,United Kingdom","institution_ids":["https://openalex.org/I47508984"]},{"raw_affiliation_string":"Chair of Embedded Intelligence for Health Care and Wellbeing, University of Augsburg, Germany","institution_ids":["https://openalex.org/I179225836"]}]}],"institution_assertions":[],"countries_distinct_count":3,"institutions_distinct_count":5,"corresponding_author_ids":[],"corresponding_institution_ids":[],"apc_list":null,"apc_paid":null,"fwci":0.769,"has_fulltext":false,"cited_by_count":2,"citation_normalized_percentile":{"value":0.998274,"is_in_top_1_percent":true,"is_in_top_10_percent":true},"cited_by_percentile_year":{"min":78,"max":84},"biblio":{"volume":null,"issue":null,"first_page":"1","last_page":"5"},"is_retracted":false,"is_paratext":false,"primary_topic":{"id":"https://openalex.org/T10860","display_name":"Speech and Audio Processing","score":0.9998,"subfield":{"id":"https://openalex.org/subfields/1711","display_name":"Signal Processing"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},"topics":[{"id":"https://openalex.org/T10860","display_name":"Speech and Audio Processing","score":0.9998,"subfield":{"id":"https://openalex.org/subfields/1711","display_name":"Signal Processing"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T10201","display_name":"Speech Recognition and Synthesis","score":0.9997,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T10667","display_name":"Emotion and Mood Recognition","score":0.9994,"subfield":{"id":"https://openalex.org/subfields/3205","display_name":"Experimental and Cognitive Psychology"},"field":{"id":"https://openalex.org/fields/32","display_name":"Psychology"},"domain":{"id":"https://openalex.org/domains/2","display_name":"Social Sciences"}}],"keywords":[],"concepts":[{"id":"https://openalex.org/C41008148","wikidata":"https://www.wikidata.org/wiki/Q21198","display_name":"Computer science","level":0,"score":0.67672837},{"id":"https://openalex.org/C28490314","wikidata":"https://www.wikidata.org/wiki/Q189436","display_name":"Speech recognition","level":1,"score":0.658092},{"id":"https://openalex.org/C204030448","wikidata":"https://www.wikidata.org/wiki/Q101017","display_name":"Distillation","level":2,"score":0.5818932},{"id":"https://openalex.org/C2777438025","wikidata":"https://www.wikidata.org/wiki/Q1339090","display_name":"Emotion recognition","level":2,"score":0.5529669},{"id":"https://openalex.org/C185592680","wikidata":"https://www.wikidata.org/wiki/Q2329","display_name":"Chemistry","level":0,"score":0.09746346},{"id":"https://openalex.org/C178790620","wikidata":"https://www.wikidata.org/wiki/Q11351","display_name":"Organic chemistry","level":1,"score":0.0}],"mesh":[],"locations_count":2,"locations":[{"is_oa":true,"landing_page_url":"https://doi.org/10.1109/icassp49357.2023.10094895","pdf_url":null,"source":{"id":"https://openalex.org/S4363607702","display_name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","issn_l":null,"issn":null,"is_oa":false,"is_in_doaj":false,"is_core":false,"host_organization":null,"host_organization_name":null,"host_organization_lineage":[],"host_organization_lineage_names":[],"type":"conference"},"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true},{"is_oa":true,"landing_page_url":"http://arxiv.org/abs/2210.14636","pdf_url":"http://arxiv.org/pdf/2210.14636","source":{"id":"https://openalex.org/S4306400194","display_name":"arXiv (Cornell University)","issn_l":null,"issn":null,"is_oa":true,"is_in_doaj":false,"is_core":false,"host_organization":"https://openalex.org/I205783295","host_organization_name":"Cornell University","host_organization_lineage":["https://openalex.org/I205783295"],"host_organization_lineage_names":["Cornell University"],"type":"repository"},"license":null,"license_id":null,"version":"submittedVersion","is_accepted":false,"is_published":false}],"best_oa_location":{"is_oa":true,"landing_page_url":"https://doi.org/10.1109/icassp49357.2023.10094895","pdf_url":null,"source":{"id":"https://openalex.org/S4363607702","display_name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","issn_l":null,"issn":null,"is_oa":false,"is_in_doaj":false,"is_core":false,"host_organization":null,"host_organization_name":null,"host_organization_lineage":[],"host_organization_lineage_names":[],"type":"conference"},"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true},"sustainable_development_goals":[{"display_name":"Decent work and economic growth","id":"https://metadata.un.org/sdg/8","score":0.43}],"grants":[{"funder":"https://openalex.org/F4320311649","funder_display_name":"Ministry of Education","award_id":null}],"datasets":[],"versions":[],"referenced_works_count":23,"referenced_works":["https://openalex.org/W1494198834","https://openalex.org/W2074788634","https://openalex.org/W2803098682","https://openalex.org/W2915760784","https://openalex.org/W2969889150","https://openalex.org/W2973049979","https://openalex.org/W2987861506","https://openalex.org/W3015832261","https://openalex.org/W3092074970","https://openalex.org/W3094790722","https://openalex.org/W3137609883","https://openalex.org/W3139270985","https://openalex.org/W3164605550","https://openalex.org/W3186192207","https://openalex.org/W3197642003","https://openalex.org/W3202370288","https://openalex.org/W3203140070","https://openalex.org/W3206996142","https://openalex.org/W3216586892","https://openalex.org/W4283215442","https://openalex.org/W4285733490","https://openalex.org/W4361994820","https://openalex.org/W4375869379"],"related_works":["https://openalex.org/W4396701345","https://openalex.org/W4391913857","https://openalex.org/W4391375266","https://openalex.org/W3126677997","https://openalex.org/W2899084033","https://openalex.org/W2748952813","https://openalex.org/W2390279801","https://openalex.org/W2376932109","https://openalex.org/W2358668433","https://openalex.org/W1610857240"],"abstract_inverted_index":{"Speech":[0],"emotion":[1],"recognition":[2],"(SER)":[3],"is":[4,15,100],"the":[5,36,45,127,133,138,146,188],"task":[6],"of":[7,39,47,106,116,120,129,141,196],"recognising":[8],"humans'":[9],"emotional":[10],"states":[11],"from":[12,44,178],"speech.":[13],"SER":[14,41,70,99,152],"extremely":[16],"prevalent":[17],"in":[18,85],"helping":[19],"dialogue":[20],"systems":[21],"to":[22,35,69,183],"truly":[23],"understand":[24],"our":[25,121],"emotions":[26],"and":[27,83,112,144,187],"become":[28],"a":[29,95,104,109,171],"trustworthy":[30],"human":[31],"conversational":[32],"partner.":[33],"Due":[34],"lengthy":[37],"nature":[38],"speech,":[40],"also":[42],"suffers":[43],"lack":[46],"abundant":[48],"labelled":[49],"data":[50,192],"for":[51,180],"powerful":[52,156],"models":[53,60,77,157],"like":[54],"deep":[55],"neural":[56],"networks.":[57],"Pre-trained":[58],"complex":[59,76],"on":[61,150,166,191],"large-scale":[62],"speech":[63,142],"datasets":[64],"have":[65],"been":[66],"successfully":[67],"applied":[68],"via":[71],"transfer":[72],"learning.":[73],"However,":[74],"fine-tuning":[75,108,173],"still":[78],"requires":[79],"large":[80],"memory":[81],"space":[82],"results":[84],"low":[86],"inference":[87],"efficiency.":[88],"In":[89],"this":[90],"paper,":[91],"we":[92],"argue":[93],"achieving":[94],"fast":[96],"yet":[97],"effective":[98],"possible":[101],"with":[102,193],"self-distillation,":[103],"method":[105,131],"simultaneously":[107],"pretrained":[110],"model":[111],"training":[113,177],"shallower":[114],"versions":[115],"itself.":[117],"The":[118],"benefits":[119],"self-distillation":[122,130,181],"framework":[123],"are":[124],"threefold:":[125],"(1)":[126],"adoption":[128],"upon":[132],"acoustic":[134],"modality":[135],"breaks":[136],"through":[137],"limited":[139],"ground-truth":[140],"data,":[143],"outperforms":[145],"existing":[147],"models'":[148],"performance":[149],"an":[151],"dataset;":[153],"(2)":[154],"executing":[155],"at":[158],"different":[159],"depths":[160],"can":[161],"achieve":[162],"adaptive":[163],"accuracy-efficiency":[164],"trade-offs":[165],"resource-limited":[167],"edge":[168],"devices;":[169],"(3)":[170],"new":[172],"process":[174],"rather":[175],"than":[176],"scratch":[179],"leads":[182],"faster":[184],"learning":[185],"time":[186],"state-of-the-art":[189],"accuracy":[190],"small":[194],"quantities":[195],"label":[197],"information.":[198]},"cited_by_api_url":"https://api.openalex.org/works?filter=cites:W4372270299","counts_by_year":[{"year":2024,"cited_by_count":1},{"year":2023,"cited_by_count":1}],"updated_date":"2024-12-31T05:11:42.008491","created_date":"2023-05-07"}