{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T16:59:25Z","timestamp":1743008365201,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":20,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819996391"},{"type":"electronic","value":"9789819996407"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-981-99-9640-7_24","type":"book-chapter","created":{"date-parts":[[2024,1,4]],"date-time":"2024-01-04T15:02:38Z","timestamp":1704380558000},"page":"321-333","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Modular Joint Training for\u00a0Speech-Driven 3D Facial Animation"],"prefix":"10.1007","author":[{"given":"Xinran","family":"Cao","sequence":"first","affiliation":[]},{"given":"Jia","family":"Zhu","sequence":"additional","affiliation":[]},{"given":"Changfan","family":"Pan","sequence":"additional","affiliation":[]},{"given":"Changqin","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Jianyang","family":"Shi","sequence":"additional","affiliation":[]},{"given":"Xin","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,1,5]]},"reference":[{"key":"24_CR1","doi-asserted-by":"crossref","unstructured":"Alghamdi, M.M., Wang, H., Bulpitt, A.J., Hogg, D.C.: Talking head from speech audio using a pre-trained image generator. In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 5228\u20135236 (2022)","DOI":"10.1145\/3503161.3548101"},{"key":"24_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"35","DOI":"10.1007\/978-3-030-58545-7_3","volume-title":"Computer Vision \u2013 ECCV 2020","author":"L Chen","year":"2020","unstructured":"Chen, L., et al.: Talking-head generation with rhythmic head motion. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12354, pp. 35\u201351. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58545-7_3"},{"key":"24_CR3","doi-asserted-by":"crossref","unstructured":"Chen, L., Maddox, R.K., Duan, Z., Xu, C.: Hierarchical cross-modal talking face generation with dynamic pixel-wise loss. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7832\u20137841 (2019)","DOI":"10.1109\/CVPR.2019.00802"},{"key":"24_CR4","doi-asserted-by":"crossref","unstructured":"Cudeiro, D., Bolkart, T., Laidlaw, C., Ranjan, A., Black, M.J.: Capture, learning, and synthesis of 3d speaking styles. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10101\u201310111 (2019)","DOI":"10.1109\/CVPR.2019.01034"},{"key":"24_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"408","DOI":"10.1007\/978-3-030-58577-8_25","volume-title":"Computer Vision \u2013 ECCV 2020","author":"D Das","year":"2020","unstructured":"Das, D., Biswas, S., Sinha, S., Bhowmick, B.: Speech-driven facial animation using cascaded GANs for learning of motion and texture. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12375, pp. 408\u2013424. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58577-8_25"},{"issue":"6","key":"24_CR6","doi-asserted-by":"publisher","first-page":"971","DOI":"10.1016\/j.cag.2006.08.017","volume":"30","author":"JM De Martino","year":"2006","unstructured":"De Martino, J.M., Magalh\u00e3es, L.P., Violaro, F.: Facial animation based on context-dependent visemes. Comput. Graph. 30(6), 971\u2013980 (2006)","journal-title":"Comput. Graph."},{"issue":"4","key":"24_CR7","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2897824.2925984","volume":"35","author":"P Edwards","year":"2016","unstructured":"Edwards, P., Landreth, C., Fiume, E., Singh, K.: Jali: an animator-centric viseme model for expressive lip synchronization. ACM Trans. Graph. (TOG) 35(4), 1\u201311 (2016)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"24_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"372","DOI":"10.1007\/978-3-319-93764-9_35","volume-title":"Latent Variable Analysis and Signal Separation","author":"SE Eskimez","year":"2018","unstructured":"Eskimez, S.E., Maddox, R.K., Xu, C., Duan, Z.: Generating talking face landmarks from speech. In: Deville, Y., Gannot, S., Mason, R., Plumbley, M.D., Ward, D. (eds.) LVA\/ICA 2018. LNCS, vol. 10891, pp. 372\u2013381. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-319-93764-9_35"},{"key":"24_CR9","doi-asserted-by":"crossref","unstructured":"Ezzat, T., Poggio, T.: Miketalk: A talking facial display based on morphing visemes. In: Proceedings Computer Animation 1998 (Cat. No. 98EX169), pp. 96\u2013102. IEEE (1998)","DOI":"10.1109\/CA.1998.681913"},{"key":"24_CR10","doi-asserted-by":"crossref","unstructured":"Fan, Y., Lin, Z., Saito, J., Wang, W., Komura, T.: FaceFormer: speech-driven 3d facial animation with transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18770\u201318780 (2022)","DOI":"10.1109\/CVPR52688.2022.01821"},{"issue":"3","key":"24_CR11","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2890493","volume":"35","author":"P Garrido","year":"2016","unstructured":"Garrido, P., et al.: Reconstruction of personalized 3d face rigs from monocular video. ACM Trans. Graph. (TOG) 35(3), 1\u201315 (2016)","journal-title":"ACM Trans. Graph. (TOG)"},{"issue":"4","key":"24_CR12","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3072959.3073658","volume":"36","author":"T Karras","year":"2017","unstructured":"Karras, T., Aila, T., Laine, S., Herva, A., Lehtinen, J.: Audio-driven facial animation by joint end-to-end learning of pose and emotion. ACM Trans. Graph. (TOG) 36(4), 1\u201312 (2017)","journal-title":"ACM Trans. Graph. (TOG)"},{"issue":"6","key":"24_CR13","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2816795.2818130","volume":"34","author":"Y Liu","year":"2015","unstructured":"Liu, Y., Xu, F., Chai, J., Tong, X., Wang, L., Huo, Q.: Video-audio driven real-time facial animation. ACM Trans. Graph. (TOG) 34(6), 1\u201310 (2015)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"24_CR14","doi-asserted-by":"crossref","unstructured":"Mittal, G., Wang, B.: Animating face using disentangled audio representations. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 3290\u20133298 (2020)","DOI":"10.1109\/WACV45572.2020.9093527"},{"key":"24_CR15","doi-asserted-by":"publisher","DOI":"10.1201\/b10705","volume-title":"Computer Facial Animation","author":"FI Parke","year":"2008","unstructured":"Parke, F.I., Waters, K.: Computer Facial Animation. CRC Press, Boca Raton (2008)"},{"key":"24_CR16","doi-asserted-by":"crossref","unstructured":"Prajwal, K., Mukhopadhyay, R., Namboodiri, V.P., Jawahar, C.: A lip sync expert is all you need for speech to lip generation in the wild. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 484\u2013492 (2020)","DOI":"10.1145\/3394171.3413532"},{"key":"24_CR17","doi-asserted-by":"crossref","unstructured":"Richard, A., Zollh\u00f6fer, M., Wen, Y., De la Torre, F., Sheikh, Y.: Meshtalk: 3d face animation from speech using cross-modality disentanglement. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 1173\u20131182 (2021)","DOI":"10.1109\/ICCV48922.2021.00121"},{"issue":"4","key":"24_CR18","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3072959.3073640","volume":"36","author":"S Suwajanakorn","year":"2017","unstructured":"Suwajanakorn, S., Seitz, S.M., Kemelmacher-Shlizerman, I.: Synthesizing Obama: learning lip sync from audio. ACM Trans. Graph. (ToG) 36(4), 1\u201313 (2017)","journal-title":"ACM Trans. Graph. (ToG)"},{"key":"24_CR19","doi-asserted-by":"crossref","unstructured":"Wang, L., Han, W., Soong, F.K.: High quality lip-sync animation for 3d photo-realistic talking head. In: 2012 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4529\u20134532. IEEE (2012)","DOI":"10.1109\/ICASSP.2012.6288925"},{"key":"24_CR20","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"690","DOI":"10.1007\/978-3-030-01261-8_41","volume-title":"Computer Vision \u2013 ECCV 2018","author":"O Wiles","year":"2018","unstructured":"Wiles, O., Koepke, A.S., Zisserman, A.: X2Face: a network for controlling face generation using images, audio, and pose codes. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11217, pp. 690\u2013706. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01261-8_41"}],"container-title":["Communications in Computer and Information Science","Computer Supported Cooperative Work and Social Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-99-9640-7_24","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,7]],"date-time":"2024-11-07T12:37:29Z","timestamp":1730983049000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-99-9640-7_24"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9789819996391","9789819996407"],"references-count":20,"URL":"https:\/\/doi.org\/10.1007\/978-981-99-9640-7_24","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"5 January 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ChineseCSCW","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"CCF Conference on Computer Supported Cooperative Work and Social Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Harbin","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18 August 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"20 August 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"chinesecscw2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conf.scholat.com\/ccscw\/2023","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes. Microsoft CMT.","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"221","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"54","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"24% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}