{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,4]],"date-time":"2026-03-04T18:38:04Z","timestamp":1772649484777,"version":"3.50.1"},"reference-count":69,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2024,10,9]],"date-time":"2024-10-09T00:00:00Z","timestamp":1728432000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,9]],"date-time":"2024-10-09T00:00:00Z","timestamp":1728432000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100012165","name":"Key Technologies Research and Development Program","doi-asserted-by":"publisher","award":["2018YFC0807306"],"award-info":[{"award-number":["2018YFC0807306"]}],"id":[{"id":"10.13039\/501100012165","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U1936212"],"award-info":[{"award-number":["U1936212"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62120106009"],"award-info":[{"award-number":["62120106009"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004826","name":"Natural Science Foundation of Beijing Municipality","doi-asserted-by":"publisher","award":["4222014"],"award-info":[{"award-number":["4222014"]}],"id":[{"id":"10.13039\/501100004826","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100010244","name":"Science Foundation of China University of Petroleum, Beijing","doi-asserted-by":"publisher","award":["No.2462024YJRC023"],"award-info":[{"award-number":["No.2462024YJRC023"]}],"id":[{"id":"10.13039\/501100010244","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1007\/s11263-024-02249-7","type":"journal-article","created":{"date-parts":[[2024,10,9]],"date-time":"2024-10-09T17:02:40Z","timestamp":1728493360000},"page":"1532-1548","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Mining Generalized Multi-timescale Inconsistency for Detecting Deepfake Videos"],"prefix":"10.1007","volume":"133","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5204-2929","authenticated-orcid":false,"given":"Yang","family":"Yu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6776-6348","authenticated-orcid":false,"given":"Rongrong","family":"Ni","sequence":"additional","affiliation":[]},{"given":"Siyuan","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Yu","family":"Ni","sequence":"additional","affiliation":[]},{"given":"Yao","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Alex C.","family":"Kot","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,9]]},"reference":[{"key":"2249_CR1","doi-asserted-by":"crossref","unstructured":"Afchar, D., Nozick, V., Yamagishi, J., & Echizen, I. (2018). Mesonet: A compact facial video forgery detection network. In 2018 IEEE international workshop on information forensics and security (WIFS), (pp. 1\u20137). IEEE.","DOI":"10.1109\/WIFS.2018.8630761"},{"key":"2249_CR2","doi-asserted-by":"crossref","unstructured":"Amerini, I., Galteri, L., Caldelli, R., & Del\u00a0Bimbo, A. (2019). Deepfake video detection through optical flow based cnn. In Proceedings of the IEEE international conference on computer vision workshops, (pp. 0\u20130).","DOI":"10.1109\/ICCVW.2019.00152"},{"key":"2249_CR3","doi-asserted-by":"crossref","unstructured":"Chai, L., Bau, D., Lim, S. N., & Isola, P. (2020). What makes fake images detectable? Understanding properties that generalize. In European conference on computer vision, (pp. 103\u2013120). Springer.","DOI":"10.1007\/978-3-030-58574-7_7"},{"key":"2249_CR4","doi-asserted-by":"crossref","unstructured":"Chen, S., Yao, T., Chen, Y., Ding, S., Li, J., & Ji, R. (2021). Local relation learning for face forgery detection. In Proceedings of the AAAI conference on artificial intelligence (vol.\u00a035, pp. 1081\u20131088).","DOI":"10.1609\/aaai.v35i2.16193"},{"key":"2249_CR5","doi-asserted-by":"crossref","unstructured":"Chen, Z., & Yang, H. (2021). Attentive semantic exploring for manipulated face detection. In ICASSP 2021\u20132021 IEEE international conference on acoustics, speech and signal processing (ICASSP) (pp. 1985\u20131989). IEEE.","DOI":"10.1109\/ICASSP39728.2021.9414225"},{"issue":"5","key":"2249_CR6","doi-asserted-by":"crossref","first-page":"1024","DOI":"10.1109\/JSTSP.2020.2999185","volume":"14","author":"A Chintha","year":"2020","unstructured":"Chintha, A., Thai, B., Sohrawardi, S. J., Bhatt, K., Hickerson, A., Wright, M., & Ptucha, R. (2020). Recurrent convolutional structures for audio spoof and video deepfake detection. IEEE Journal of Selected Topics in Signal Processing, 14(5), 1024\u20131037.","journal-title":"IEEE Journal of Selected Topics in Signal Processing"},{"key":"2249_CR7","doi-asserted-by":"crossref","unstructured":"Choi, D. H., Lee, H. J., Lee, S., Kim, J. U., & Ro, Y. M. (2020). Fake video detection with certainty-based attention network. In 2020 IEEE international conference on image processing (ICIP) (pp. 823\u2013827). IEEE.","DOI":"10.1109\/ICIP40778.2020.9190655"},{"key":"2249_CR8","doi-asserted-by":"crossref","unstructured":"Choi, J., Kim, T., Jeong, Y., Baek, S., & Choi, J. (2024). Exploiting style latent flows for generalizing deepfake video detection. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 1133\u20131143).","DOI":"10.1109\/CVPR52733.2024.00114"},{"key":"2249_CR9","doi-asserted-by":"crossref","unstructured":"Chugh, K., Gupta, P., Dhall, A., & Subramanian, R. (2020). Not made for each other-audio-visual dissonance-based deepfake detection and localization. In Proceedings of the 28th ACM international conference on multimedia (pp. 439\u2013447).","DOI":"10.1145\/3394171.3413700"},{"key":"2249_CR10","unstructured":"Ciftci, U. A., Demir, I., & Yin, L. (2020). Fakecatcher: Detection of synthetic portrait videos using biological signals. IEEE Transactions on Pattern Analysis and Machine Intelligence."},{"key":"2249_CR11","doi-asserted-by":"crossref","unstructured":"Coccomini, D. A., Messina, N., Gennaro, C., & Falchi, F. (2022). Combining efficientnet and vision transformers for video deepfake detection. In International conference on image analysis and processing (pp. 219\u2013229). Springer.","DOI":"10.1007\/978-3-031-06433-3_19"},{"issue":"1","key":"2249_CR12","doi-asserted-by":"crossref","first-page":"19","DOI":"10.1007\/s10479-005-5724-z","volume":"134","author":"PT De Boer","year":"2005","unstructured":"De Boer, P. T., Kroese, D. P., Mannor, S., & Rubinstein, R. Y. (2005). A tutorial on the cross-entropy method. Annals of Operations Research, 134(1), 19\u201367.","journal-title":"Annals of Operations Research"},{"key":"2249_CR13","unstructured":"Dolhansky, B., Bitton, J., Pflaum, B., Lu, J., Howes, R., Wang, M., & Ferrer, C. C. (2020). The deepfake detection challenge (dfdc) dataset. arXiv preprint arXiv:2006.07397."},{"key":"2249_CR14","unstructured":"Dufour, N., & Gully, A. (2019). Contributing data to deepfake detection research. https:\/\/ai.googleblog.com\/2019\/09\/contributing-data-to-deepfake-detection.html."},{"key":"2249_CR15","doi-asserted-by":"crossref","unstructured":"Fernandes, S., Raj, S., Ortiz, E., Vintila, I., Salter, M., Urosevic, G., & Jha, S. (2019). Predicting heart rate variations of deepfake videos using neural ode. In Proceedings of the IEEE international conference on computer vision workshops (pp. 0\u20130).","DOI":"10.1109\/ICCVW.2019.00213"},{"key":"2249_CR16","unstructured":"Frank, J., Eisenhofer, T., Sch\u00f6nherr, L., Fischer, A., Kolossa, D., & Holz, T. (2020). Leveraging frequency analysis for deep fake image recognition. In International conference on machine learning (pp. 3247\u20133258). PMLR."},{"key":"2249_CR17","unstructured":"Ganiyusufoglu, I., Ng\u00f4, L. M., Savov, N., Karaoglu, S., & Gevers, T. (2020). Spatio-temporal features for generalized detection of deepfake videos. arXiv preprint arXiv:2010.11844."},{"key":"2249_CR18","unstructured":"Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., & Bengio, Y. (2014). Generative adversarial nets. Advances in neural information processing systems 27."},{"key":"2249_CR19","doi-asserted-by":"crossref","unstructured":"Gu, Q., Chen, S., Yao, T., Chen, Y., Ding, S., & Yi, R. (2022). Exploiting fine-grained face forgery clues via progressive enhancement learning. In Proceedings of the AAAI conference on artificial intelligence.","DOI":"10.1609\/aaai.v36i1.19954"},{"key":"2249_CR20","doi-asserted-by":"crossref","unstructured":"Gu, Z., Chen, Y., Yao, T., Ding, S., Li, J., Huang, F., & Ma, L. (2021). Spatiotemporal inconsistency learning for deepfake video detection. In Proceedings of the 29th ACM international conference on multimedia (pp. 3473\u20133481).","DOI":"10.1145\/3474085.3475508"},{"key":"2249_CR21","doi-asserted-by":"crossref","unstructured":"Gu, Z., Chen, Y., Yao, T., Ding, S., Li, J., & Ma, L. (2022). Delving into the local: Dynamic inconsistency learning for deepfake video detection. In Proceedings of the AAAI conference on artificial intelligence.","DOI":"10.1609\/aaai.v36i1.19955"},{"key":"2249_CR22","doi-asserted-by":"crossref","unstructured":"G\u00fcera, D., & Delp, E. J. (2018). Deepfake video detection using recurrent neural networks. In 2018 15th IEEE international conference on advanced video and signal based surveillance (AVSS) (pp. 1\u20136). IEEE.","DOI":"10.1109\/AVSS.2018.8639163"},{"key":"2249_CR23","doi-asserted-by":"crossref","unstructured":"Haliassos, A., Vougioukas, K., Petridis, S., & Pantic, M. (2021). Lips don\u2019t lie: A generalisable and robust approach to face forgery detection. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 5039\u20135049).","DOI":"10.1109\/CVPR46437.2021.00500"},{"key":"2249_CR24","doi-asserted-by":"crossref","unstructured":"He, P., Li, H., & Wang, H. (2019). Detection of fake images via the ensemble of deep representations from multi color spaces. In 2019 IEEE international conference on image processing (ICIP) (pp. 2299\u20132303). IEEE.","DOI":"10.1109\/ICIP.2019.8803740"},{"key":"2249_CR25","doi-asserted-by":"crossref","unstructured":"Jiang, L., Li, R., Wu, W., Qian, C., & Loy, C. C. (2020). Deeperforensics-1.0: A large-scale dataset for real-world face forgery detection. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 2889\u20132898).","DOI":"10.1109\/CVPR42600.2020.00296"},{"issue":"7","key":"2249_CR26","doi-asserted-by":"crossref","first-page":"1678","DOI":"10.1007\/s11263-022-01606-8","volume":"130","author":"F Juefei-Xu","year":"2022","unstructured":"Juefei-Xu, F., Wang, R., Huang, Y., Guo, Q., Ma, L., & Liu, Y. (2022). Countering malicious deepfakes: Survey, battleground, and horizon. International Journal of Computer Vision, 130(7), 1678\u20131734.","journal-title":"International Journal of Computer Vision"},{"key":"2249_CR27","unstructured":"Kingma, D. P., Ba, J. (2014). Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980."},{"key":"2249_CR28","unstructured":"Li, J., Xie, H., Yu, L., Gao, X., & Zhang, Y. (2021). Discriminative feature mining based on frequency information and metric learning for face forgery detection. IEEE Transactions on Knowledge and Data Engineering."},{"key":"2249_CR29","doi-asserted-by":"crossref","unstructured":"Li, L., Bao, J., Zhang, T., Yang, H., Chen, D., Wen, F., & Guo, B. (2020). Face x-ray for more general face forgery detection. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 5001\u20135010).","DOI":"10.1109\/CVPR42600.2020.00505"},{"key":"2249_CR30","doi-asserted-by":"crossref","unstructured":"Li, X., Lang, Y., Chen, Y., Mao, X., He, Y., Wang, S., Xue, H., & Lu, Q. (2020). Sharp multiple instance learning for deepfake video detection. In Proceedings of the 28th ACM international conference on multimedia (pp. 1864\u20131872).","DOI":"10.1145\/3394171.3414034"},{"key":"2249_CR31","doi-asserted-by":"crossref","unstructured":"Li, Y., Chang, M. C., & Lyu, S. (2018). In ictu oculi: Exposing ai created fake videos by detecting eye blinking. In 2018 IEEE international workshop on information forensics and security (WIFS) (pp. 1\u20137). IEEE.","DOI":"10.1109\/WIFS.2018.8630787"},{"key":"2249_CR32","unstructured":"Li, Y., & Lyu, S. (2019). Exposing deepfake videos by detecting face warping artifacts. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops (pp. 46\u201352)."},{"key":"2249_CR33","doi-asserted-by":"crossref","unstructured":"Li, Y., Yang, X., Sun, P., Qi, H., & Lyu, S. (2020). Celeb-df: A large-scale challenging dataset for deepfake forensics. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 3207\u20133216).","DOI":"10.1109\/CVPR42600.2020.00327"},{"key":"2249_CR34","doi-asserted-by":"crossref","unstructured":"Liu, H., Li, X., Zhou, W., Chen, Y., He, Y., Xue, H., Zhang, W., & Yu, N. (2021). Spatial-phase shallow learning: rethinking face forgery detection in frequency domain. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 772\u2013781).","DOI":"10.1109\/CVPR46437.2021.00083"},{"key":"2249_CR35","doi-asserted-by":"crossref","first-page":"202","DOI":"10.1007\/s11263-013-0688-y","volume":"110","author":"S Lyu","year":"2014","unstructured":"Lyu, S., Pan, X., & Zhang, X. (2014). Exposing region splicing forgeries with blind local noise estimation. International Journal of Computer Vision, 110, 202\u2013221.","journal-title":"International Journal of Computer Vision"},{"key":"2249_CR36","unstructured":"Maaten, L. v. d., & Hinton, G. (2008). Visualizing data using t-sne. Journal of machine learning research9(Nov), 2579\u20132605."},{"key":"2249_CR37","doi-asserted-by":"crossref","unstructured":"Masi, I., Killekar, A., Mascarenhas, R. M., Gurudatt, S. P., & AbdAlmageed, W. (2020). Two-branch recurrent network for isolating deepfakes in videos. In European conference on computer vision (pp. 667\u2013684). Springer.","DOI":"10.1007\/978-3-030-58571-6_39"},{"key":"2249_CR38","doi-asserted-by":"crossref","unstructured":"Matern, F., Riess, C., & Stamminger, M. (2019). Exploiting visual artifacts to expose deepfakes and face manipulations. In 2019 IEEE winter applications of computer vision workshops (WACVW) (pp. 83\u201392). IEEE.","DOI":"10.1109\/WACVW.2019.00020"},{"key":"2249_CR39","doi-asserted-by":"crossref","unstructured":"McCloskey, S., & Albright, M. (2019). Detecting gan-generated imagery using saturation cues. In 2019 IEEE international conference on image processing (ICIP) (pp. 4584\u20134588). IEEE.","DOI":"10.1109\/ICIP.2019.8803661"},{"key":"2249_CR40","doi-asserted-by":"crossref","unstructured":"Mittal, T., Bhattacharya, U., Chandra, R., Bera, A., & Manocha, D. (2020). Emotions don\u2019t lie: An audio-visual deepfake detection method using affective cues. In Proceedings of the 28th ACM international conference on multimedia (pp. 2823\u20132832).","DOI":"10.1145\/3394171.3413570"},{"key":"2249_CR41","doi-asserted-by":"crossref","unstructured":"Nguyen, H. H., Yamagishi, J., & Echizen, I. (2019). Capsule-forensics: Using capsule networks to detect forged images and videos. In ICASSP 2019-2019 IEEE international conference on acoustics, speech and signal processing (ICASSP) (pp. 2307\u20132311). IEEE.","DOI":"10.1109\/ICASSP.2019.8682602"},{"key":"2249_CR42","doi-asserted-by":"crossref","unstructured":"Qi, H., Guo, Q., Juefei-Xu, F., Xie, X., Ma, L., Feng, W., Liu, Y., & Zhao, J. (2020). Deeprhythm: Exposing deepfakes with attentional visual heartbeat rhythms. In Proceedings of the 28th ACM international conference on multimedia (pp. 4318\u20134327).","DOI":"10.1145\/3394171.3413707"},{"key":"2249_CR43","doi-asserted-by":"crossref","unstructured":"Qian, Y., Yin, G., Sheng, L., Chen, Z., Shao, J. (2020). Thinking in frequency: Face forgery detection by mining frequency-aware clues. In European conference on computer vision (pp. 86\u2013103). Springer.","DOI":"10.1007\/978-3-030-58610-2_6"},{"key":"2249_CR44","doi-asserted-by":"crossref","unstructured":"Rossler, A., Cozzolino, D., Verdoliva, L., Riess, C., Thies, J., & Nie\u00dfner, M. (2019). Faceforensics++: Learning to detect manipulated facial images. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 1\u201311).","DOI":"10.1109\/ICCV.2019.00009"},{"issue":"1","key":"2249_CR45","first-page":"80","volume":"3","author":"E Sabir","year":"2019","unstructured":"Sabir, E., Cheng, J., Jaiswal, A., AbdAlmageed, W., Masi, I., & Natarajan, P. (2019). Recurrent convolutional strategies for face manipulation detection in videos. Interfaces (GUI), 3(1), 80\u201387.","journal-title":"Interfaces (GUI)"},{"key":"2249_CR46","doi-asserted-by":"crossref","unstructured":"Selvaraju, R. R., Cogswell, M., Das, A., Vedantam, R., Parikh, D., & Batra, D. (2017). Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision (pp. 618\u2013626).","DOI":"10.1109\/ICCV.2017.74"},{"key":"2249_CR47","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2021.107950","volume":"116","author":"Z Shang","year":"2021","unstructured":"Shang, Z., Xie, H., Zha, Z., Yu, L., Li, Y., & Zhang, Y. (2021). Prrnet: Pixel-region relation network for face forgery detection. Pattern Recognition, 116, 107950.","journal-title":"Pattern Recognition"},{"key":"2249_CR48","doi-asserted-by":"crossref","unstructured":"Shiohara, K., & Yamasaki, T. (2022). Detecting deepfakes with self-blended images. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 18720\u201318729).","DOI":"10.1109\/CVPR52688.2022.01816"},{"key":"2249_CR49","doi-asserted-by":"crossref","unstructured":"Sun, K., Liu, H., Ye, Q., Liu, J., Gao, Y., Shao, L., & Ji, R. (2021). Domain general face forgery detection by learning to weight. In Proceedings of the AAAI conference on artificial intelligence (vol.\u00a035, pp. 2638\u20132646).","DOI":"10.1609\/aaai.v35i3.16367"},{"issue":"4","key":"2249_CR50","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3306346.3323035","volume":"38","author":"J Thies","year":"2019","unstructured":"Thies, J., Zollh\u00f6fer, M., & Nie\u00dfner, M. (2019). Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 38(4), 1\u201312.","journal-title":"ACM Transactions on Graphics (TOG)"},{"key":"2249_CR51","doi-asserted-by":"crossref","unstructured":"Thies, J., Zollhofer, M., Stamminger, M., Theobalt, C., & Nie\u00dfner, M. (2016). Face2face: Real-time face capture and reenactment of rgb videos. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 2387\u20132395).","DOI":"10.1109\/CVPR.2016.262"},{"key":"2249_CR52","doi-asserted-by":"crossref","unstructured":"Wang, R., Juefei-Xu, F., Ma, L., Xie, X., Huang, Y., Wang, J., & Liu, Y. (2019). Fakespotter: A simple yet robust baseline for spotting ai-synthesized fake faces. arXiv pp. arXiv\u20131909.","DOI":"10.24963\/ijcai.2020\/476"},{"key":"2249_CR53","doi-asserted-by":"crossref","unstructured":"Wang, S. Y., Wang, O., Zhang, R., Owens, A., Efros, A. A. (2020). Cnn-generated images are surprisingly easy to spot... for now. In Proceedings of the IEEE conference on computer vision and pattern recognition (vol.\u00a07).","DOI":"10.1109\/CVPR42600.2020.00872"},{"key":"2249_CR54","doi-asserted-by":"crossref","unstructured":"Xu, Y., Liang, J., Jia, G., Yang, Z., Zhang, Y., & He, R. (2023). Tall: Thumbnail layout for deepfake video detection. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 22658\u201322668).","DOI":"10.1109\/ICCV51070.2023.02071"},{"key":"2249_CR55","doi-asserted-by":"crossref","unstructured":"Yang, X., Li, Y., & Lyu, S. (2019). Exposing deep fakes using inconsistent head poses. In ICASSP 2019-2019 IEEE international conference on acoustics, speech and signal processing (ICASSP) (pp 8261\u20138265). IEEE.","DOI":"10.1109\/ICASSP.2019.8683164"},{"key":"2249_CR56","doi-asserted-by":"crossref","first-page":"71","DOI":"10.1007\/s11263-010-0403-1","volume":"92","author":"I Yerushalmy","year":"2011","unstructured":"Yerushalmy, I., & Hel-Or, H. (2011). Digital image forgery detection based on lens and sensor aberration. International Journal of Computer Vision, 92, 71\u201391.","journal-title":"International Journal of Computer Vision"},{"key":"2249_CR57","doi-asserted-by":"crossref","unstructured":"Yin, Q., Lu, W., Li, B., & Huang, J. (2023). Dynamic difference learning with spatio-temporal correlation for deepfake video detection. IEEE Transactions on Information Forensics and Security.","DOI":"10.1109\/TIFS.2023.3290752"},{"key":"2249_CR58","unstructured":"Yosinski, J., Clune, J., Bengio, Y., & Lipson, H. (2014). How transferable are features in deep neural networks? In Advances in neural information processing systems (pp. 3320\u20133328)."},{"key":"2249_CR59","doi-asserted-by":"crossref","unstructured":"Yu, N., Davis, L. S., & Fritz, M. (2019). Attributing fake images to gans: Learning and analyzing gan fingerprints. In Proceedings of the IEEE international conference on computer vision (pp. 7556\u20137566).","DOI":"10.1109\/ICCV.2019.00765"},{"issue":"4","key":"2249_CR60","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3499026","volume":"18","author":"Y Yu","year":"2022","unstructured":"Yu, Y., Ni, R., Li, W., & Zhao, Y. (2022). Detection of ai-manipulated fake faces via mining generalized features. ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM), 18(4), 1\u201323.","journal-title":"ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM)"},{"key":"2249_CR61","doi-asserted-by":"crossref","unstructured":"Zhang, D., Li, C., Lin, F., Zeng, D., & Ge, S. (2021). Detecting deepfake videos with temporal dropout 3dcnn. In IJCAI.","DOI":"10.24963\/ijcai.2021\/178"},{"key":"2249_CR62","doi-asserted-by":"crossref","first-page":"845","DOI":"10.1007\/s11263-019-01175-3","volume":"127","author":"H Zhang","year":"2019","unstructured":"Zhang, H., Riggan, B. S., Hu, S., Short, N. J., & Patel, V. M. (2019). Synthesis of high-quality visible faces from polarimetric thermal faces using generative adversarial networks. International Journal of Computer Vision, 127, 845\u2013862.","journal-title":"International Journal of Computer Vision"},{"issue":"10","key":"2249_CR63","doi-asserted-by":"crossref","first-page":"1499","DOI":"10.1109\/LSP.2016.2603342","volume":"23","author":"K Zhang","year":"2016","unstructured":"Zhang, K., Zhang, Z., Li, Z., & Qiao, Y. (2016). Joint face detection and alignment using multitask cascaded convolutional networks. IEEE Signal Processing Letters, 23(10), 1499\u20131503.","journal-title":"IEEE Signal Processing Letters"},{"key":"2249_CR64","doi-asserted-by":"crossref","first-page":"1335","DOI":"10.1109\/TIFS.2023.3239223","volume":"18","author":"C Zhao","year":"2023","unstructured":"Zhao, C., Wang, C., Hu, G., Chen, H., Liu, C., & Tang, J. (2023). Istvt: Interpretable spatial-temporal video transformer for deepfake detection. IEEE Transactions on Information Forensics and Security, 18, 1335\u20131348.","journal-title":"IEEE Transactions on Information Forensics and Security"},{"key":"2249_CR65","doi-asserted-by":"crossref","unstructured":"Zhao, H., Zhou, W., Chen, D., Wei, T., Zhang, W., & Yu, N. (2021). Multi-attentional deepfake detection. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 2185\u20132194).","DOI":"10.1109\/CVPR46437.2021.00222"},{"key":"2249_CR66","doi-asserted-by":"crossref","first-page":"2514","DOI":"10.1007\/s11263-020-01328-9","volume":"128","author":"L Zhao","year":"2020","unstructured":"Zhao, L., Peng, X., Tian, Y., Kapadia, M., & Metaxas, D. N. (2020). Towards image-to-video translation: A structure-aware approach via multi-stage generative adversarial networks. International Journal of Computer Vision, 128, 2514\u20132533.","journal-title":"International Journal of Computer Vision"},{"key":"2249_CR67","doi-asserted-by":"crossref","unstructured":"Zhao, T., Xu, X., Xu, M., Ding, H., Xiong, Y., & Xia, W. (2021). Learning self-consistency for deepfake detection. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 15023\u201315033).","DOI":"10.1109\/ICCV48922.2021.01475"},{"key":"2249_CR68","doi-asserted-by":"crossref","unstructured":"Zheng, Y., Bao, J., Chen, D., Zeng, M., & Wen, F. (2021). Exploring temporal coherence for more general video face forgery detection. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 15044\u201315054).","DOI":"10.1109\/ICCV48922.2021.01477"},{"key":"2249_CR69","doi-asserted-by":"crossref","unstructured":"Zi, B., Chang, M., Chen, J., Ma, X., & Jiang, Y. G. (2020). Wilddeepfake: A challenging real-world dataset for deepfake detection. In Proceedings of the 28th ACM international conference on multimedia (pp. 2382\u20132390).","DOI":"10.1145\/3394171.3413769"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-024-02249-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-024-02249-7\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-024-02249-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,30]],"date-time":"2025-03-30T22:09:33Z","timestamp":1743372573000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-024-02249-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,9]]},"references-count":69,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2025,4]]}},"alternative-id":["2249"],"URL":"https:\/\/doi.org\/10.1007\/s11263-024-02249-7","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,9]]},"assertion":[{"value":"15 September 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"12 September 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 October 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}