{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T18:40:47Z","timestamp":1776883247050,"version":"3.51.2"},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10446933","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"10316-10320","source":"Crossref","is-referenced-by-count":12,"title":["Paralinguistics-Enhanced Large Language Modeling of Spoken Dialogue"],"prefix":"10.1109","author":[{"given":"Guan-Ting","family":"Lin","sequence":"first","affiliation":[{"name":"Amazon Alexa AI,USA"}]},{"given":"Prashanth Gurunath","family":"Shivakumar","sequence":"additional","affiliation":[{"name":"Amazon Alexa AI,USA"}]},{"given":"Ankur","family":"Gandhe","sequence":"additional","affiliation":[{"name":"Amazon Alexa AI,USA"}]},{"given":"Chao-Han Huck","family":"Yang","sequence":"additional","affiliation":[{"name":"Amazon Alexa AI,USA"}]},{"given":"Yile","family":"Gu","sequence":"additional","affiliation":[{"name":"Amazon Alexa AI,USA"}]},{"given":"Shalini","family":"Ghosh","sequence":"additional","affiliation":[{"name":"Amazon Alexa AI,USA"}]},{"given":"Andreas","family":"Stolcke","sequence":"additional","affiliation":[{"name":"Amazon Alexa AI,USA"}]},{"given":"Hung-Yi","family":"Lee","sequence":"additional","affiliation":[{"name":"National Taiwan University,Taiwan"}]},{"given":"Ivan","family":"Bulyko","sequence":"additional","affiliation":[{"name":"Amazon Alexa AI,USA"}]}],"member":"263","reference":[{"key":"ref1","article-title":"GPT-4 technical report","year":"2023"},{"key":"ref2","article-title":"Is ChatGPT equipped with emotional dialogue capabilities?","author":"Zhao","year":"2023"},{"key":"ref3","first-page":"4619","article-title":"Towards multi-modal sarcasm detection (an obviously perfect paper)","volume-title":"Proc. ACL","author":"Castro"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-68612-7_54"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3514197.3549692"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1359"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2020.3015491"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.eacl-main.255"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.379"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1162\/089120100561737"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2011.07.009"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01298-4_29"},{"key":"ref13","article-title":"User response and sentiment prediction for automatic dialogue evaluation","author":"Ghazarian","year":"2021"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.331"},{"key":"ref15","first-page":"6165","article-title":"Polise: Reinforcing politeness using user sentiment for customer care response generation","volume-title":"Proc. Intl. Conf. on Computational Linguistics","author":"Firdaus"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683679"},{"key":"ref17","first-page":"50","article-title":"Speech emotion recognition based on fusion method","volume":"5","author":"Motamed","year":"2017","journal-title":"Journal of Information Systems and Telecommunication"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/2993148.2997630"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2016.09.117"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10023234"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794468"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1820"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1236"},{"key":"ref24","first-page":"1336","article-title":"On generative spoken language modeling from raw audio","volume":"9","author":"Lakhotia","year":"2021","journal-title":"TACL"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.593"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00545"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3288409"},{"key":"ref28","article-title":"AudioPaLM: A large language model that can speak and listen","author":"Rubenstein","year":"2023"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.1055"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acldemos.30"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052937"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1723"},{"key":"ref33","first-page":"12449","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","volume":"33","author":"Baevski","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-236"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10023428"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1992.225858"},{"key":"ref37","first-page":"6549","article-title":"A large scale speech sentiment corpus","volume-title":"Proc. LREC","author":"Chen"},{"key":"ref38","article-title":"A state-of-the-art large-scale pretrained response generation model"},{"key":"ref39","article-title":"Wav2vec2-large-robust"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Seoul, Korea, Republic of","start":{"date-parts":[[2024,4,14]]},"end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10446933.pdf?arnumber=10446933","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T05:27:44Z","timestamp":1722576464000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10446933\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10446933","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}