{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,13]],"date-time":"2024-09-13T11:38:07Z","timestamp":1726227487185},"publisher-location":"Cham","reference-count":18,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031404979"},{"type":"electronic","value":"9783031404986"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-40498-6_26","type":"book-chapter","created":{"date-parts":[[2023,8,22]],"date-time":"2023-08-22T19:02:34Z","timestamp":1692730954000},"page":"293-303","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Voice Cloning for\u00a0Voice Disorders: Impact of\u00a0Phonetic Content"],"prefix":"10.1007","author":[{"given":"Lily","family":"Wadoux","sequence":"first","affiliation":[]},{"given":"Nelly","family":"Barbot","sequence":"additional","affiliation":[]},{"given":"Jonathan","family":"Chevelu","sequence":"additional","affiliation":[]},{"given":"Damien","family":"Lolive","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,8,23]]},"reference":[{"key":"26_CR1","doi-asserted-by":"publisher","unstructured":"Andreev, P., Alanov, A., Ivanov, O., Vetrov, D.: HiFi++: a unified framework for bandwidth extension and speech enhancement (2022). https:\/\/doi.org\/10.48550\/ARXIV.2203.13086","DOI":"10.48550\/ARXIV.2203.13086"},{"key":"26_CR2","unstructured":"Arik, S.O., Chen, J., Peng, K., Ping, W., Zhou, Y.: Neural voice cloning with a few samples. In: Advances in Neural Information Processing Systems, pp. 10019\u201310029 (2018)"},{"key":"26_CR3","doi-asserted-by":"publisher","unstructured":"Baevski, A., Zhou, H., Mohamed, A., Auli, M.: wav2vec 2.0: a framework for self-supervised learning of speech representations (2020). https:\/\/doi.org\/10.48550\/ARXIV.2006.11477","DOI":"10.48550\/ARXIV.2006.11477"},{"key":"26_CR4","unstructured":"Chen, Y., et al.: Sample efficient adaptive text-to-speech. In: Proceedings of the International Conference on Learning Representations (2019)"},{"key":"26_CR5","doi-asserted-by":"publisher","unstructured":"Cooper, E., et al.: Zero-shot multi-speaker text-to-speech with state-of-the-art neural speaker embeddings. In: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6184\u20136188 (2020). https:\/\/doi.org\/10.1109\/ICASSP40776.2020.9054535","DOI":"10.1109\/ICASSP40776.2020.9054535"},{"key":"26_CR6","unstructured":"Jia, Y., et al.: Transfer learning from speaker verification to multispeaker text-to-speech synthesis. In: Proceedings of the Neural Information Processing Systems Conference, no. 32 (2018)"},{"key":"26_CR7","unstructured":"Le Huche, F., Allali, A.: La voix. Collection Phoniatrie, Elsevier Masson, 2e \u00e9dition edn. (2010)"},{"key":"26_CR8","doi-asserted-by":"publisher","unstructured":"Lo, C.C., et al.: MOSNet: deep learning-based objective assessment for voice conversion. In: Interspeech (2019). https:\/\/doi.org\/10.21437\/Interspeech.2019-2003","DOI":"10.21437\/Interspeech.2019-2003"},{"key":"26_CR9","unstructured":"Mozilla: CommonVoice, commonvoice.mozilla.org, consulted in December 2020"},{"key":"26_CR10","doi-asserted-by":"publisher","unstructured":"Prenger, R., Valle, R., Catanzaro, B.: WaveGlow: a flow-based generative network for speech synthesis. In: 2019 IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP 2019, pp. 3617\u20133621 (2019). https:\/\/doi.org\/10.1109\/ICASSP.2019.8683143","DOI":"10.1109\/ICASSP.2019.8683143"},{"key":"26_CR11","doi-asserted-by":"crossref","unstructured":"Shen, J., et al.: Natural TTS synthesis by conditioning WaveNet on Mel spectrogram predictions. In: Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) (2018)","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"26_CR12","unstructured":"Sini, A.: Characterisation and generation of expressivity in function of speaking styles for audiobook synthesis. Theses, Universit\u00e9 Rennes 1 (2020)"},{"key":"26_CR13","unstructured":"Sini, A., Lolive, D., Vidal, G., Tahon, M., Delais-Roussarie, E.: SynPaFlex-corpus: an expressive French audiobooks corpus dedicated to expressive speech synthesis. In: Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC), Miyazaki, Japan (2018)"},{"key":"26_CR14","doi-asserted-by":"publisher","unstructured":"Sini, A., Maguer, S.L., Lolive, D., Delais-Roussarie, E.: Introducing prosodic speaker identity for a better expressive speech synthesis control. In: 10th International Conference on Speech Prosody 2020, Tokyo, Japan, pp. 935\u2013939. ISCA (2020). https:\/\/doi.org\/10.21437\/speechprosody.2020-191. https:\/\/hal.science\/hal-03000148","DOI":"10.21437\/speechprosody.2020-191"},{"key":"26_CR15","doi-asserted-by":"crossref","unstructured":"Snyder, D., Garcia-Romero, D., Povey, D., Khudanpur, S.: Deep neural network embeddings for text-independent speaker verification. In: Proceedings of Interspeech (2017)","DOI":"10.21437\/Interspeech.2017-620"},{"issue":"1","key":"26_CR16","doi-asserted-by":"publisher","first-page":"31","DOI":"10.3322\/caac.21386","volume":"67","author":"CE Steuer","year":"2017","unstructured":"Steuer, C.E., El-Deiry, M., Parks, J.R., Higgins, K.A., Saba, N.F.: An update on larynx cancer. CA Cancer J. Clin. 67(1), 31\u201350 (2017)","journal-title":"CA Cancer J. Clin."},{"key":"26_CR17","doi-asserted-by":"crossref","unstructured":"Wan, L., Wang, Q., Papir, A., Moreno, I.L.: Generalized end-to-end loss for speaker verification. In: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4879\u20134883 (2018)","DOI":"10.1109\/ICASSP.2018.8462665"},{"key":"26_CR18","unstructured":"Yamagishi, J., Honnet, P.E., Garner, P., Lazaridis, A.: The SIWIS French speech synthesis database. Technical report, Idiap Research Institute (2017)"}],"container-title":["Lecture Notes in Computer Science","Text, Speech, and Dialogue"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-40498-6_26","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,22]],"date-time":"2023-08-22T19:06:06Z","timestamp":1692731166000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-40498-6_26"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031404979","9783031404986"],"references-count":18,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-40498-6_26","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"23 August 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"TSD","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Text, Speech, and Dialogue","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Pilsen","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Czech Republic","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 September 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"6 September 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"tsd2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.kiv.zcu.cz\/tsd2023\/index.php","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMS & back-office system","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"64","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"31","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"48% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.56","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}