{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T19:17:34Z","timestamp":1757618254434,"version":"3.44.0"},"reference-count":39,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2025,6,12]],"date-time":"2025-06-12T00:00:00Z","timestamp":1749686400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,6,12]],"date-time":"2025-06-12T00:00:00Z","timestamp":1749686400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"The Tianshan Talents-Technology Innovation Leading Talents Project of Xinjiang Uygur Autonomous Region, China","award":["20243123706","20243123706","20243123706","20243123706","20243123706"],"award-info":[{"award-number":["20243123706","20243123706","20243123706","20243123706","20243123706"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Lang Resources &amp; Evaluation"],"published-print":{"date-parts":[[2025,9]]},"DOI":"10.1007\/s10579-025-09845-0","type":"journal-article","created":{"date-parts":[[2025,6,12]],"date-time":"2025-06-12T00:06:18Z","timestamp":1749686778000},"page":"3029-3050","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["A Chinese natural speech complex emotion dataset based on emotion vector annotation method"],"prefix":"10.1007","volume":"59","author":[{"given":"Xiaolong","family":"Wu","sequence":"first","affiliation":[]},{"given":"Chaobo","family":"Song","sequence":"additional","affiliation":[]},{"given":"Shanshan","family":"Xiang","sequence":"additional","affiliation":[]},{"given":"Ronghe","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Chang","family":"Feng","sequence":"additional","affiliation":[]},{"given":"Hankiz","family":"Yilahun","sequence":"additional","affiliation":[]},{"given":"Mingxing","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Askar","family":"Hamdulla","sequence":"additional","affiliation":[]},{"given":"Thomas Fang","family":"Zheng","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,6,12]]},"reference":[{"key":"9845_CR1","doi-asserted-by":"publisher","first-page":"56","DOI":"10.1016\/j.specom.2019.12.001","volume":"116","author":"MB Ak\u00e7ay","year":"2020","unstructured":"Ak\u00e7ay, M. B., & O\u011fuz, K. (2020). Speech emotion recognition: Emotional models, databases, features, preprocessing methods, supporting modalities, and classifiers. Speech Communication, 116, 56\u201376.","journal-title":"Speech Communication"},{"key":"9845_CR2","doi-asserted-by":"crossref","unstructured":"Ando, A., Kobashikawa, S., Kamiyama, H., Masumura, R., Ijima, Y., & Aono, Y. (2018). Soft-target training with ambiguous emotional utterances for dnn-based speech emotion classification. In 2018 IEEE international conference on acoustics, speech and signal processing (ICASSP) (pp. 4964\u20134968). IEEE.","DOI":"10.1109\/ICASSP.2018.8461299"},{"key":"9845_CR3","first-page":"12449","volume":"33","author":"A Baevski","year":"2020","unstructured":"Baevski, A., Zhou, Y., Mohamed, A., & Auli, M. (2020). wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in Neural Information Processing Systems, 33, 12449\u201312460.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"9845_CR4","doi-asserted-by":"crossref","unstructured":"Bao, W., Li, Y., Gu, M., Yang, M., Li, H., Chao, L., & Tao, J. (2014). Building a chinese natural emotional audio-visual database. In 2014 12th international conference on signal processing (ICSP) (pp. 583\u2013587). IEEE.","DOI":"10.1109\/ICOSP.2014.7015071"},{"key":"9845_CR5","unstructured":"Batliner, A., Fischer, K., Huber, R., Spilker, J., & N\u00f6th, E. (2000). Desperately seeking emotions or: Actors, wizards, and human beings. In ISCA tutorial and research workshop (ITRW) on speech and emotion."},{"key":"9845_CR6","doi-asserted-by":"publisher","first-page":"1606","DOI":"10.3389\/fpsyg.2019.01606","volume":"10","author":"R Berrios","year":"2019","unstructured":"Berrios, R. (2019). What is complex\/emotional about emotional complexity? Frontiers in Psychology, 10, 1606.","journal-title":"Frontiers in Psychology"},{"key":"9845_CR7","first-page":"1517","volume":"5","author":"F Burkhardt","year":"2005","unstructured":"Burkhardt, F., Paeschke, A., Rolfes, M., Sendlmeier, W. F., Weiss, B., et al. (2005). A database of German emotional speech. Interspeech, 5, 1517\u20131520.","journal-title":"Interspeech"},{"key":"9845_CR8","doi-asserted-by":"publisher","first-page":"335","DOI":"10.1007\/s10579-008-9076-6","volume":"42","author":"C Busso","year":"2008","unstructured":"Busso, C., Bulut, M., Lee, C.-C., Kazemzadeh, A., Mower, E., Kim, S., Chang, J. N., Lee, S., & Narayanan, S. S. (2008). Iemocap: Interactive emotional dyadic motion capture database. Language Resources and Evaluation, 42, 335\u2013359.","journal-title":"Language Resources and Evaluation"},{"issue":"1","key":"9845_CR9","doi-asserted-by":"publisher","first-page":"67","DOI":"10.1109\/TAFFC.2016.2515617","volume":"8","author":"C Busso","year":"2016","unstructured":"Busso, C., Parthasarathy, S., Burmania, A., AbdelWahab, M., Sadoughi, N., & Provost, E. M. (2016). Msp-improv: An acted corpus of dyadic interactions to study emotion perception. IEEE Transactions on Affective Computing, 8(1), 67\u201380.","journal-title":"IEEE Transactions on Affective Computing"},{"key":"9845_CR10","doi-asserted-by":"crossref","unstructured":"Chou, H.-C., Lin, W.-C., Chang, L.-C., Li, C.-C., Ma, H.-P., & Lee, C.-C. (2017). Nnime: The nthu-ntua Chinese interactive multimodal emotion corpus. In 2017 seventh international conference on affective computing and intelligent interaction (ACII) (pp. 292\u2013298). IEEE.","DOI":"10.1109\/ACII.2017.8273615"},{"issue":"5","key":"9845_CR11","doi-asserted-by":"publisher","first-page":"149","DOI":"10.12700\/APH.19.5.2022.5.8","volume":"19","author":"M Develasco","year":"2022","unstructured":"Develasco, M., Justo, R., Zorrilla, A. L., & Torres, M. I. (2022). Automatic analysis of emotions from the voices\/speech in Spanish tv debates. Acta Polytechnica Hungarica, 19(5), 149\u2013172.","journal-title":"Acta Polytechnica Hungarica"},{"issue":"4","key":"9845_CR12","doi-asserted-by":"publisher","first-page":"407","DOI":"10.1016\/j.neunet.2005.03.007","volume":"18","author":"L Devillers","year":"2005","unstructured":"Devillers, L., Vidrascu, L., & Lamel, L. (2005). Challenges in real-life emotion annotation and machine learning based detection. Neural Networks, 18(4), 407\u2013422.","journal-title":"Neural Networks"},{"key":"9845_CR13","volume-title":"The nature of emotion: Fundamental questions","author":"PE Ekman","year":"1994","unstructured":"Ekman, P. E., & Davidson, R. J. (1994). The nature of emotion: Fundamental questions. Oxford University Press."},{"issue":"1","key":"9845_CR14","doi-asserted-by":"publisher","first-page":"234","DOI":"10.1109\/TCSS.2019.2922593","volume":"7","author":"BA Erol","year":"2019","unstructured":"Erol, B. A., Majumdar, A., Benavidez, P., Rad, P., Choo, K.-K.R., & Jamshidi, M. (2019). Toward artificial emotional intelligence for cooperative social human-machine interaction. IEEE Transactions on Computational Social Systems, 7(1), 234\u2013246.","journal-title":"IEEE Transactions on Computational Social Systems"},{"key":"9845_CR15","doi-asserted-by":"crossref","unstructured":"Fayek, H. M., Lech, M., & Cavedon, L. (2016). Modeling subjectiveness in emotion recognition with deep neural networks: Ensembles vs soft labels. In 2016 international joint conference on neural networks (IJCNN) (pp. 566\u2013570). IEEE.","DOI":"10.1109\/IJCNN.2016.7727250"},{"issue":"1","key":"9845_CR16","doi-asserted-by":"publisher","first-page":"68","DOI":"10.4018\/jse.2010101605","volume":"1","author":"H Gunes","year":"2010","unstructured":"Gunes, H., & Pantic, M. (2010). Automatic, dimensional and continuous emotion recognition. International Journal of Synthetic Emotions (IJSE), 1(1), 68\u201399.","journal-title":"International Journal of Synthetic Emotions (IJSE)"},{"key":"9845_CR17","doi-asserted-by":"publisher","first-page":"157","DOI":"10.1007\/s10433-011-0191-7","volume":"8","author":"EL Hay","year":"2011","unstructured":"Hay, E. L., & Diehl, M. (2011). Emotion complexity and emotion regulation across adulthood. European Journal of Ageing, 8, 157\u2013168.","journal-title":"European Journal of Ageing"},{"key":"9845_CR18","doi-asserted-by":"crossref","unstructured":"Li, Y., Tao, J., Schuller, B., Shan, S., Jiang, D., & Jia, J. (2018). Mec 2017: Multimodal emotion recognition challenge. In 2018 first Asian conference on affective computing and intelligent interaction (ACII Asia) (pp. 1\u20135). IEEE.","DOI":"10.1109\/ACIIAsia.2018.8470342"},{"key":"9845_CR19","doi-asserted-by":"publisher","first-page":"913","DOI":"10.1007\/s12652-016-0406-z","volume":"8","author":"Y Li","year":"2017","unstructured":"Li, Y., Tao, J., Chao, L., Bao, W., & Liu, Y. (2017). Cheavd: A Chinese natural emotional audio-visual database. Journal of Ambient Intelligence and Humanized Computing, 8, 913\u2013924.","journal-title":"Journal of Ambient Intelligence and Humanized Computing"},{"key":"9845_CR20","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2023.103027","volume":"157","author":"W-C Lin","year":"2024","unstructured":"Lin, W.-C., & Carlos, B. (2024). Deep temporal clustering features for speech emotion recognition. Speech Communication, 157, Article 103027.","journal-title":"Speech Communication"},{"key":"9845_CR21","first-page":"513","volume-title":"Handbook of emotions","author":"KA Lindquist","year":"2008","unstructured":"Lindquist, K. A., & Barrett, L. F. (2008). Emotional complexity. In M. Lewis, J. M. Haviland-Jones, & L. F. Barrett (Eds.), Handbook of emotions (3rd ed., pp. 513\u2013530). The Guilford Press.","edition":"3"},{"key":"9845_CR22","doi-asserted-by":"publisher","first-page":"1042","DOI":"10.3758\/s13428-012-0203-3","volume":"44","author":"P Liu","year":"2012","unstructured":"Liu, P., & Pell, M. D. (2012). Recognizing vocal emotions in mandarin Chinese: A validated database of Chinese vocal emotional stimuli. Behavior Research Methods, 44, 1042\u20131051.","journal-title":"Behavior Research Methods"},{"key":"9845_CR23","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2023.103010","volume":"156","author":"M Liu","year":"2024","unstructured":"Liu, M., Alex, N. J., Vijayarajan, R., Ma, K., Zhuang, Z., & Zhuang, S. (2024). Multiscale-multichannel feature extraction and classification through one-dimensional convolutional neural network for speech emotion recognition. Speech Communication, 156, Article 103010.","journal-title":"Speech Communication"},{"issue":"4","key":"9845_CR24","doi-asserted-by":"publisher","first-page":"471","DOI":"10.1109\/TAFFC.2017.2736999","volume":"10","author":"R Lotfian","year":"2017","unstructured":"Lotfian, R., & Busso, C. (2017). Building naturalistic emotionally balanced speech corpus by retrieving emotional speech from existing podcast recordings. IEEE Transactions on Affective Computing, 10(4), 471\u2013483.","journal-title":"IEEE Transactions on Affective Computing"},{"key":"9845_CR25","doi-asserted-by":"crossref","unstructured":"Mao, S., Ching, P.-C., & Lee, T. (2020). Emotion profile refinery for speech emotion classification. Preprint retrieved from http:\/\/arxiv.org\/abs\/2008.05259","DOI":"10.21437\/Interspeech.2020-1771"},{"key":"9845_CR26","doi-asserted-by":"crossref","unstructured":"Martin, O., Kotsia, I., Macq, B., & Pitas, I. (2006). The enterface\u201905 audio-visual emotion database. In 22nd international conference on data engineering workshops (ICDEW\u201906) (pp. 8\u201318). IEEE.","DOI":"10.1109\/ICDEW.2006.145"},{"key":"9845_CR27","doi-asserted-by":"publisher","first-page":"261","DOI":"10.1007\/BF02686918","volume":"14","author":"A Mehrabian","year":"1996","unstructured":"Mehrabian, A. (1996). Pleasure-arousal-dominance: A general framework for describing and measuring individual differences in temperament. Current Psychology, 14, 261\u2013292.","journal-title":"Current Psychology"},{"key":"9845_CR28","doi-asserted-by":"crossref","unstructured":"Mohammad, S., Bravo-Marquez, F., Salameh, M., & Kiritchenko, S. (2018). Semeval-2018 task 1: Affect in tweets. In Proceedings of the 12th international workshop on semantic evaluation (pp. 1\u201317).","DOI":"10.18653\/v1\/S18-1001"},{"key":"9845_CR29","unstructured":"Plutchik, R. (1980). Emotion. A psychoevolutionary synthesis."},{"key":"9845_CR30","first-page":"1","volume":"1","author":"R Plutchik","year":"1980","unstructured":"Plutchik, R. (1980). A general psychoevolutionary theory of emotion. Emotion Theory, Research, and Experience, 1, 1.","journal-title":"Emotion Theory, Research, and Experience"},{"key":"9845_CR31","doi-asserted-by":"crossref","unstructured":"Ringeval, F., Sonderegger, A., Sauer, J., & Lalanne, D. (2013). Introducing the recola multimodal corpus of remote collaborative and affective interactions. In: 2013 10th IEEE international conference and workshops on automatic face and gesture recognition (FG) (pp. 1\u20138). IEEE.","DOI":"10.1109\/FG.2013.6553805"},{"issue":"9\u201310","key":"9845_CR32","doi-asserted-by":"publisher","first-page":"1062","DOI":"10.1016\/j.specom.2011.01.011","volume":"53","author":"B Schuller","year":"2011","unstructured":"Schuller, B., Batliner, A., Steidl, S., & Seppi, D. (2011). Recognising realistic emotions and affect in speech: State of the art and lessons learnt from the first challenge. Speech Communication, 53(9\u201310), 1062\u20131087.","journal-title":"Speech Communication"},{"issue":"1","key":"9845_CR33","doi-asserted-by":"publisher","first-page":"114","DOI":"10.1080\/02699930701319154","volume":"22","author":"GP Strauss","year":"2008","unstructured":"Strauss, G. P., & Allen, D. N. (2008). Emotional intensity and categorisation ratings for emotional and nonemotional words. Cognition and Emotion, 22(1), 114\u2013133.","journal-title":"Cognition and Emotion"},{"issue":"4","key":"9845_CR34","doi-asserted-by":"publisher","first-page":"664","DOI":"10.1177\/0022022118763749","volume":"49","author":"M Sun","year":"2018","unstructured":"Sun, M., & Lau, A. S. (2018). Exploring cultural differences in expressive suppression and emotion recognition. Journal of Cross-Cultural Psychology, 49(4), 664\u2013672.","journal-title":"Journal of Cross-Cultural Psychology"},{"key":"9845_CR35","doi-asserted-by":"crossref","unstructured":"Tu, Z., Liu, B., Zhao, W., & Cao, B. (2021). Establishment of chinese speech emotion database of broadcasting. In 2021 international conference on culture-oriented science & technology (ICCST) (pp. 603\u2013606). IEEE.","DOI":"10.1109\/ICCST53801.2021.00131"},{"key":"9845_CR36","doi-asserted-by":"publisher","DOI":"10.1515\/9780804764360","volume-title":"On the origins of human emotions: A sociological inquiry into the evolution of human affect","author":"J Turner","year":"2000","unstructured":"Turner, J. (2000). On the origins of human emotions: A sociological inquiry into the evolution of human affect. Stanford University Press."},{"key":"9845_CR37","unstructured":"Wang, Y., Boumadane, A., & Heba, A. (2021). A fine-tuned wav2vec 2.0\/hubert benchmark for speech emotion recognition, speaker verification and spoken language understanding. Preprint retrieved from http:\/\/arxiv.org\/abs\/2111.02735"},{"issue":"4","key":"9845_CR38","doi-asserted-by":"publisher","first-page":"1632","DOI":"10.1109\/TKDE.2019.2947040","volume":"33","author":"N Xu","year":"2019","unstructured":"Xu, N., Liu, Y.-P., & Geng, X. (2019). Label enhancement for label distribution learning. IEEE Transactions on Knowledge and Data Engineering, 33(4), 1632\u20131643.","journal-title":"IEEE Transactions on Knowledge and Data Engineering"},{"key":"9845_CR39","doi-asserted-by":"crossref","unstructured":"Zhao, S., Tuan, L. A., Fu, J., Wen, J., & Luo, W. (2024). Exploring clean label backdoor attacks and defense in language models. In IEEE\/ACM transactions on audio, speech, and language processing.","DOI":"10.1109\/TASLP.2024.3407571"}],"container-title":["Language Resources and Evaluation"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10579-025-09845-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10579-025-09845-0\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10579-025-09845-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,6]],"date-time":"2025-09-06T19:15:55Z","timestamp":1757186155000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10579-025-09845-0"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,12]]},"references-count":39,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2025,9]]}},"alternative-id":["9845"],"URL":"https:\/\/doi.org\/10.1007\/s10579-025-09845-0","relation":{},"ISSN":["1574-020X","1574-0218"],"issn-type":[{"type":"print","value":"1574-020X"},{"type":"electronic","value":"1574-0218"}],"subject":[],"published":{"date-parts":[[2025,6,12]]},"assertion":[{"value":"13 May 2025","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"12 June 2025","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no conflict of interest to declare that are relevant to the content of this article.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"All authors have read and agreed to the published version of the manuscript.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}}]}}