{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,20]],"date-time":"2024-09-20T16:58:49Z","timestamp":1726851529742},"reference-count":10,"publisher":"The Open Journal","issue":"81","license":[{"start":{"date-parts":[[2023,1,27]],"date-time":"2023-01-27T00:00:00Z","timestamp":1674777600000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/creativecommons.org\/licenses\/by\/4.0\/"},{"start":{"date-parts":[[2023,1,27]],"date-time":"2023-01-27T00:00:00Z","timestamp":1674777600000},"content-version":"am","delay-in-days":0,"URL":"http:\/\/creativecommons.org\/licenses\/by\/4.0\/"},{"start":{"date-parts":[[2023,1,27]],"date-time":"2023-01-27T00:00:00Z","timestamp":1674777600000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["JOSS"],"published-print":{"date-parts":[[2023,1,27]]},"DOI":"10.21105\/joss.04739","type":"journal-article","created":{"date-parts":[[2023,1,27]],"date-time":"2023-01-27T10:34:33Z","timestamp":1674815673000},"page":"4739","source":"Crossref","is-referenced-by-count":3,"title":["Spafe: Simplified python audio features\nextraction"],"prefix":"10.21105","volume":"8","author":[{"ORCID":"http:\/\/orcid.org\/0000-0002-9008-7562","authenticated-orcid":false,"given":"Ayoub","family":"Malek","sequence":"first","affiliation":[]}],"member":"8722","reference":[{"issue":"7825","key":"numpy:2020","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-020-2649-2","article-title":"Array programming with NumPy","volume":"585","author":"Harris","year":"2020","unstructured":"Harris, C. R., Millman, K. J., Walt,\nS. J. van der, Gommers, R., Virtanen, P., Cournapeau, D., Wieser, E.,\nTaylor, J., Berg, S., Smith, N. J., Kern, R., Picus, M., Hoyer, S.,\nKerkwijk, M. H. van, Brett, M., Haldane, A., R\u00edo, J. F. del, Wiebe, M.,\nPeterson, P., \u2026 Oliphant, T. E. (2020). Array programming with NumPy.\nNature, 585(7825), 357\u2013362.\nhttps:\/\/doi.org\/10.1038\/s41586-020-2649-2","journal-title":"Nature"},{"key":"scipy:2019","doi-asserted-by":"publisher","DOI":"10.1038\/s41592-019-0686-2","article-title":"SciPy 1.0: Fundamental algorithms for\nscientific computing in Python","author":"Pauli Virtanen","year":"2019","unstructured":"Pauli Virtanen, T. E. O., Ralf\nGommers, & V\u00e1zquez\u2010Baeza, Y. (2019). SciPy 1.0: Fundamental\nalgorithms for scientific computing in Python. Nature Methods, 1\u201312.\nhttps:\/\/doi.org\/10.1038\/s41592-019-0686-2","journal-title":"Nature Methods"},{"issue":"27","key":"speechpy:2018","doi-asserted-by":"publisher","DOI":"10.21105\/joss.00749","article-title":"SpeechPy - a library for speech processing\nand recognition","volume":"3","author":"Torfi","year":"2018","unstructured":"Torfi, A. (2018). SpeechPy - a\nlibrary for speech processing and recognition. Journal of Open Source\nSoftware, 3(27), 749.\nhttps:\/\/doi.org\/10.21105\/joss.00749","journal-title":"Journal of Open Source\nSoftware"},{"key":"librosa:2015","doi-asserted-by":"publisher","DOI":"10.25080\/Majora-7b98e3ed-003","article-title":"Librosa: Audio and Music Signal Analysis in\nPython","author":"McFee","year":"2015","unstructured":"McFee, Brian, Raffel, Colin, Liang,\nDawen, Ellis, Daniel P.W., McVicar, Matt, Battenberg, Eric, & Nieto,\nOriol. (2015). Librosa: Audio and Music Signal Analysis in Python. In\nKathryn Huff & James Bergstra (Eds.), Proceedings of the 14th Python\nin Science Conference (pp. 18\u201324).\nhttps:\/\/doi.org\/10.25080\/Majora-7b98e3ed-003","journal-title":"Proceedings of the 14th Python in Science\nConference"},{"key":"python_speech_features:2020","doi-asserted-by":"publisher","DOI":"10.5281\/ZENODO.3607820","volume-title":"Jameslyons\/python_speech_features: Release\nv0.6.1","author":"Lyons","year":"2020","unstructured":"Lyons, J., Wang, D. Y.-B., Gianluca,\nShteingart, H., Mavrinac, E., Gaurkar, Y., Watcharawisetkul, W., Birch,\nS., Zhihe, L., H\u00f6lzl, J., Lesinskis, J., Alm\u00e9r, H., Lord, C., &\nStark, A. (2020). Jameslyons\/python_speech_features: Release v0.6.1.\nZenodo. https:\/\/doi.org\/10.5281\/ZENODO.3607820"},{"key":"bob:2017","article-title":"Continuously reproducing toolchains in\npattern recognition and machine learning experiments","author":"A. Anjos","year":"2017","unstructured":"A. Anjos, T. de F. P., M. G\u00fcnther,\n& Marcel, S. (2017, August). Continuously reproducing toolchains in\npattern recognition and machine learning experiments. International\nConference on Machine Learning (ICML).\nhttp:\/\/publications.idiap.ch\/downloads\/papers\/2017\/Anjos_ICML2017-2_2017.pdf","journal-title":"International conference on machine learning\n(ICML)"},{"key":"penedo:2019","doi-asserted-by":"publisher","DOI":"10.1186\/s13634-019-0632-6","article-title":"Designing digital filter banks using\nwavelets","volume":"2019","author":"Penedo","year":"2019","unstructured":"Penedo, S. R. M., Netto, M. L., &\nJusto, J. F. (2019). Designing digital filter banks using wavelets. In\nEURASIP Journal on Advances in Signal Processing (No. 1; Vol. 2019).\nSpringer Science; Business Media LLC.\nhttps:\/\/doi.org\/10.1186\/s13634-019-0632-6","journal-title":"EURASIP Journal on Advances in Signal\nProcessing"},{"key":"sarangi:2020","doi-asserted-by":"publisher","DOI":"10.1016\/j.dsp.2020.102795","article-title":"Optimization of data-driven filterbank for\nautomatic speaker verification","volume":"104","author":"Sarangi","year":"2020","unstructured":"Sarangi, S., Sahidullah, M., &\nSaha, G. (2020). Optimization of data-driven filterbank for automatic\nspeaker verification. In Digital Signal Processing (Vol. 104, p.\n102795). Elsevier BV.\nhttps:\/\/doi.org\/10.1016\/j.dsp.2020.102795","journal-title":"Digital Signal Processing"},{"key":"rastislav:2013","article-title":"Dominant frequency extraction","volume":"abs\/1306.0103","author":"Telgarsky","year":"2013","unstructured":"Telgarsky, R. (2013). Dominant\nfrequency extraction. CoRR, abs\/1306.0103.\nhttp:\/\/arxiv.org\/abs\/1306.0103","journal-title":"CoRR"},{"key":"cheveigne:2002","doi-asserted-by":"publisher","DOI":"10.1121\/1.1458024","article-title":"YIN, a fundamental frequency estimator for\nspeech and music","volume":"111","author":"Cheveign\u00e9","year":"2002","unstructured":"Cheveign\u00e9, A. de, & Kawahara, H.\n(2002). YIN, a fundamental frequency estimator for speech and music. In\nThe Journal of the Acoustical Society of America (No. 4; Vol. 111, pp.\n1917\u20131930). Acoustical Society of America (ASA).\nhttps:\/\/doi.org\/10.1121\/1.1458024","journal-title":"The Journal of the Acoustical Society of\nAmerica"}],"container-title":["Journal of Open Source Software"],"original-title":[],"link":[{"URL":"https:\/\/joss.theoj.org\/papers\/10.21105\/joss.04739.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2023,1,27]],"date-time":"2023-01-27T10:34:38Z","timestamp":1674815678000},"score":1,"resource":{"primary":{"URL":"https:\/\/joss.theoj.org\/papers\/10.21105\/joss.04739"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,1,27]]},"references-count":10,"journal-issue":{"issue":"81","published-online":{"date-parts":[[2023,1]]}},"alternative-id":["10.21105\/joss.04739"],"URL":"https:\/\/doi.org\/10.21105\/joss.04739","relation":{"has-review":[{"id-type":"uri","id":"https:\/\/github.com\/openjournals\/joss-reviews\/issues\/4739","asserted-by":"subject"}],"references":[{"id-type":"doi","id":"10.5281\/zenodo.7533946","asserted-by":"subject"}]},"ISSN":["2475-9066"],"issn-type":[{"value":"2475-9066","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,1,27]]}}}