{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T21:25:35Z","timestamp":1765229135202,"version":"3.46.0"},"reference-count":71,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Knowledge-Based Systems"],"published-print":{"date-parts":[[2025,11]]},"DOI":"10.1016\/j.knosys.2025.114738","type":"journal-article","created":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T07:59:50Z","timestamp":1761379190000},"page":"114738","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"PC","title":["Improving vision-language alignment with graph spiking hybrid networks"],"prefix":"10.1016","volume":"330","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0001-0204","authenticated-orcid":false,"given":"Siyu","family":"Zhang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6290-8941","authenticated-orcid":false,"given":"Wenzhe","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0005-5515-1943","authenticated-orcid":false,"given":"Yeming","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0000-5334-4761","authenticated-orcid":false,"given":"Yiming","family":"Wu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0004-9899-4740","authenticated-orcid":false,"given":"Heming","family":"Zheng","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2138-6286","authenticated-orcid":false,"given":"Cheng","family":"Cheng","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.knosys.2025.114738_bib0001","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2024.112827","article-title":"R-VQA: A robust visual question answering model","volume":"309","author":"Chowdhury","year":"2025","journal-title":"Knowl. Based Syst."},{"year":"2022","author":"Cao","series-title":"AlignVE: visual entailment recognition based on alignment relations","key":"10.1016\/j.knosys.2025.114738_bib0002"},{"key":"10.1016\/j.knosys.2025.114738_bib0003","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2024.111550","article-title":"Multiview adaptive attention pooling for image-text retrieval","volume":"291","author":"Ding","year":"2024","journal-title":"Knowl. Based Syst."},{"key":"10.1016\/j.knosys.2025.114738_bib0004","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2021.107408","article-title":"KVL-BERT: knowledge enhanced visual-and-linguistic BERT for visual commonsense reasoning","volume":"230","author":"Song","year":"2021","journal-title":"Knowl. Based Syst."},{"key":"10.1016\/j.knosys.2025.114738_bib0005","series-title":"Proc. IEEE Int. Conf. Comput. Vis. (ICCV)","first-page":"104","article-title":"UNITER: universal image-text representation learning","author":"Chen","year":"2020"},{"key":"10.1016\/j.knosys.2025.114738_bib0006","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR)","first-page":"6077","article-title":"Bottom-up and top-down attention for image captioning and visual question answering","author":"Anderson","year":"2018"},{"key":"10.1016\/j.knosys.2025.114738_bib0007","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR)","first-page":"10267","article-title":"In defense of grid features for visual question answering","author":"Jiang","year":"2020"},{"year":"2020","author":"Huang","series-title":"Pixel-BERT: Aligning image pixels with text by deep multi-modal transformers","key":"10.1016\/j.knosys.2025.114738_bib0008"},{"key":"10.1016\/j.knosys.2025.114738_bib0009","series-title":"Proc. Int. Conf. Mach. Learn. PMLR","first-page":"5583","article-title":"ViLT: vision-and-language transformer without convolution or region supervision","author":"Kim","year":"2021"},{"key":"10.1016\/j.knosys.2025.114738_bib0010","series-title":"Proc. Int. Conf. Learn. Represent. (ICLR)","article-title":"An image is worth 16x16 words: transformers for image recognition at scale","author":"Dosovitskiy","year":"2021"},{"key":"10.1016\/j.knosys.2025.114738_bib0011","series-title":"Proc. Eur. Conf. Comput. Vis. (ECCV)","first-page":"178","article-title":"Panoptic scene graph generation","author":"Yang","year":"2022"},{"key":"10.1016\/j.knosys.2025.114738_bib0012","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit","first-page":"12976","article-title":"Seeing out of the box: end-to-end pre-training for vision-language representation learning","author":"Huang","year":"2021"},{"key":"10.1016\/j.knosys.2025.114738_bib0013","first-page":"4514","article-title":"Probing inter-modality: visual parsing with self-attention for vision-and-language pre-training","volume":"34","author":"Xue","year":"2021","journal-title":"Proc. AAAI Conf."},{"key":"10.1016\/j.knosys.2025.114738_bib0014","series-title":"Proc. IEEE\/CVF Inter. Conf. Comput. Vis. (ICCV)","first-page":"10012","article-title":"Swin transformer: hierarchical vision transformer using shifted windows","author":"Liu","year":"2021"},{"key":"10.1016\/j.knosys.2025.114738_bib0015","series-title":"Proc. Conf. Empirical Methods Natural Lang. Process. (EMNLP)","article-title":"LXMERT: Learning cross-modality encoder representations from transformers","author":"Tan","year":"2019"},{"key":"10.1016\/j.knosys.2025.114738_bib0016","first-page":"3208","article-title":"Ernie-vil: knowledge enhanced vision-language representations through scene graphs","volume":"35","author":"Yu","year":"2021","journal-title":"Proc. AAAI Conf"},{"key":"10.1016\/j.knosys.2025.114738_bib0017","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR)","first-page":"10437","article-title":"12-In-1: multitask vision and language representation learning","author":"Lu","year":"2020"},{"key":"10.1016\/j.knosys.2025.114738_bib0018","first-page":"6616","article-title":"Large-scale adversarial training for vision-and-language representation learning","volume":"33","author":"Gan","year":"2020","journal-title":"Proc. Eur. Conf. Comput. Vis. (ECCV)"},{"year":"2021","author":"Li","series-title":"SemVLP: Vision-language pre-training by aligning semantics at multiple levels","key":"10.1016\/j.knosys.2025.114738_bib0019"},{"key":"10.1016\/j.knosys.2025.114738_bib0020","first-page":"8588","article-title":"Scaling up dynamic graph representation learning via spiking neural networks","volume":"37","author":"Li","year":"2023","journal-title":"Proc. AAAI Conf."},{"key":"10.1016\/j.knosys.2025.114738_bib0021","first-page":"11270","article-title":"Spiking-YOLO: spiking neural network for energy-efficient object detection","volume":"34","author":"Kim","year":"2020","journal-title":"Proc. AAAI Conf."},{"key":"10.1016\/j.knosys.2025.114738_bib0022","doi-asserted-by":"crossref","DOI":"10.3389\/fnins.2020.00119","article-title":"Enabling spike-based backpropagation for training deep neural network architectures","volume":"14","author":"Lee","year":"2020","journal-title":"Front. Neurosci."},{"issue":"8","key":"10.1016\/j.knosys.2025.114738_bib0023","doi-asserted-by":"crossref","first-page":"5200","DOI":"10.1109\/TNNLS.2021.3119238","article-title":"Spiking deep residual networks","volume":"34","author":"Hu","year":"2021","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"year":"2022","author":"Zhu","series-title":"Spiking graph convolutional networks","key":"10.1016\/j.knosys.2025.114738_bib0024"},{"key":"10.1016\/j.knosys.2025.114738_bib0025","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR)","first-page":"8801","article-title":"Spiking transformers for event-based single object tracking","author":"Zhang","year":"2022"},{"key":"10.1016\/j.knosys.2025.114738_bib0026","series-title":"Proc. IEEE\/CVF Inter. Conf. Comput. Vis. (ICCV)","first-page":"1761","article-title":"Masked spiking transformer","author":"Wang","year":"2023"},{"year":"2022","author":"Zhou","series-title":"Spikformer: When spiking neural network meets transformer","key":"10.1016\/j.knosys.2025.114738_bib0027"},{"key":"10.1016\/j.knosys.2025.114738_bib0028","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR)","first-page":"3992","article-title":"Convolutional feature masking for joint object and stuff segmentation","author":"Dai","year":"2015"},{"year":"2017","author":"Veli\u010dkovi\u0107","series-title":"Graph attention networks","key":"10.1016\/j.knosys.2025.114738_bib0029"},{"year":"2014","author":"Gerstner","series-title":"Neuronal Dynamics: From Single Neurons to Networks And Models of Cognition","key":"10.1016\/j.knosys.2025.114738_bib0030"},{"key":"10.1016\/j.knosys.2025.114738_bib0031","first-page":"11062","article-title":"Going deeper with directly-trained larger spiking neural networks","volume":"35","author":"Zheng","year":"2021","journal-title":"Proc. AAAI Conf."},{"key":"10.1016\/j.knosys.2025.114738_bib0032","series-title":"Proc. IEEE Conf. Comput. Vis. Pattern Recognit. (CVPR)","first-page":"104","article-title":"Uniter: universal image-text representation learning","author":"Chen","year":"2020"},{"key":"10.1016\/j.knosys.2025.114738_bib0033","series-title":"Proc. Annual Meeting of the ACL","first-page":"4171","article-title":"BERT: pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2019"},{"key":"10.1016\/j.knosys.2025.114738_bib0034","series-title":"Proc. IEEE Conf. Comput. Vis. Pattern Recognit","first-page":"248","article-title":"ImageNet: a large-scale hierarchical image database","author":"Deng","year":"2009"},{"issue":"4","key":"10.1016\/j.knosys.2025.114738_bib0035","doi-asserted-by":"crossref","first-page":"834","DOI":"10.1109\/TPAMI.2017.2699184","article-title":"Deeplab: semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected CRFs","volume":"40","author":"Chen","year":"2018","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell"},{"key":"10.1016\/j.knosys.2025.114738_bib0036","series-title":"Proc. IEEE\/CVF Inter. Conf. Comput. Vis. (ICCV)","first-page":"2980","article-title":"Mask R-CNN","author":"He","year":"2017"},{"key":"10.1016\/j.knosys.2025.114738_bib0037","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR)","first-page":"6904","article-title":"Making the v in vqa matter: elevating the role of image understanding in visual question answering","author":"Y","year":"2017"},{"key":"10.1016\/j.knosys.2025.114738_bib0038","series-title":"Proc. IEEE Int. Conf. Comput. Vis. (ICCV)","first-page":"2641","article-title":"Flickr30k entities: collecting region-to-phrase correspondences for richer image-to-sentence models","author":"Plummer","year":"2015"},{"key":"10.1016\/j.knosys.2025.114738_bib0039","first-page":"32","article-title":"Visual genome: connecting language and vision using crowdsourced dense image annotations","volume":"123","author":"Krishna","year":"2017","journal-title":"Proc"},{"key":"10.1016\/j.knosys.2025.114738_bib0040","series-title":"Proc. Eur. Conf. Comput. Vis","first-page":"740","article-title":"Microsoft COCO: common objects in context","author":"Lin","year":"2014"},{"issue":"1","key":"10.1016\/j.knosys.2025.114738_bib0041","doi-asserted-by":"crossref","first-page":"81","DOI":"10.1109\/TAI.2022.3160418","article-title":"Dual attention and question categorization-based visual question answering","volume":"4","author":"Mishra","year":"2023","journal-title":"IEEE Trans. Artif. Intell."},{"key":"10.1016\/j.knosys.2025.114738_bib0042","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2024.125817","article-title":"Towards bias-aware visual question answering: rectifying and mitigating comprehension biases","volume":"264","author":"Chen","year":"2025","journal-title":"Expert Syst. Appl."},{"key":"10.1016\/j.knosys.2025.114738_bib0043","series-title":"Proc. Int. Conf. Mach. Learn. PMLR","first-page":"1931","article-title":"Unifying vision-and-language tasks via text generation","author":"Cho","year":"2021"},{"issue":"3","key":"10.1016\/j.knosys.2025.114738_bib0044","doi-asserted-by":"crossref","first-page":"1380","DOI":"10.1109\/TNNLS.2021.3105284","article-title":"Multitask learning for visual question answering","volume":"34","author":"Ma","year":"2023","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"10.1016\/j.knosys.2025.114738_bib0045","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2023.110706","article-title":"CLVIN: Complete language-vision interaction network for visual question answering","volume":"275","author":"Chen","year":"2023","journal-title":"Knowl. Based Syst."},{"issue":"2","key":"10.1016\/j.knosys.2025.114738_bib0046","doi-asserted-by":"crossref","first-page":"1023","DOI":"10.1109\/TNNLS.2021.3104937","article-title":"Bilinear graph networks for visual question answering","volume":"34","author":"Guo","year":"2023","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"10.1016\/j.knosys.2025.114738_bib0047","series-title":"Proc","first-page":"121","article-title":"Oscar: object-semantics aligned pre-training for vision language tasks","author":"Li","year":"2020"},{"key":"10.1016\/j.knosys.2025.114738_bib0048","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR)","first-page":"23242","article-title":"Position-guided text prompt for vision-language pre-training","author":"Wang","year":"2023"},{"key":"10.1016\/j.knosys.2025.114738_bib0049","doi-asserted-by":"crossref","first-page":"6202","DOI":"10.1109\/TMM.2023.3347093","article-title":"LOIS: Looking out of instance semantics for visual question answering","volume":"26","author":"Zhang","year":"2024","journal-title":"IEEE Trans. Multimedia"},{"year":"2024","author":"Li","series-title":"Cumo: Scaling multimodal LLM with co-upcycled mixture-of-experts","key":"10.1016\/j.knosys.2025.114738_bib0050"},{"key":"10.1016\/j.knosys.2025.114738_bib0051","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR)","first-page":"5575","article-title":"VinVL: revisiting visual representations in vision language models","author":"Zhang","year":"2021"},{"issue":"4","key":"10.1016\/j.knosys.2025.114738_bib0052","first-page":"1644","article-title":"Answer again: improving VQA with cascaded-answering model","volume":"34","author":"Peng","year":"2022","journal-title":"IEEE Trans. Knowl. Data Eng."},{"issue":"1","key":"10.1016\/j.knosys.2025.114738_bib0053","doi-asserted-by":"crossref","first-page":"318","DOI":"10.1109\/TPAMI.2020.3004830","article-title":"MRANet: Improving VQA via multi-modal relation attention network","volume":"44","author":"Peng","year":"2022","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.knosys.2025.114738_bib0054","doi-asserted-by":"crossref","first-page":"8609","DOI":"10.1109\/TMM.2024.3380259","article-title":"Cross modality bias in visual question answering: a causal view with possible worlds VQA","volume":"26","author":"Vosoughi","year":"2024","journal-title":"IEEE Trans. Multimedia"},{"key":"10.1016\/j.knosys.2025.114738_bib0055","series-title":"Proc. 44th ACM SIGIR Conference on Research and Development in Information Retrieval","first-page":"1955","article-title":"LPF: A language-prior feedback objective function for de-biased visual question answering","author":"Liang","year":"2021"},{"key":"10.1016\/j.knosys.2025.114738_bib0056","series-title":"Proc. IEEE Int. Conf. Comput. Vis. (ICCV)","first-page":"1584","article-title":"Greedy gradient ensemble for robust visual question answering","author":"Han","year":"2021"},{"year":"2023","author":"Lu","series-title":"Lyrics: Boosting Fine-Grained Language-Vision Alignment and Comprehension via Semantic-Aware Visual Objects","key":"10.1016\/j.knosys.2025.114738_bib0057"},{"key":"10.1016\/j.knosys.2025.114738_bib0058","article-title":"Learning by abstraction: the neural state machine","volume":"32","author":"Hudson","year":"2019","journal-title":"Proc"},{"key":"10.1016\/j.knosys.2025.114738_bib0059","series-title":"Proc. IEEE Int. Conf. Comput. Vis. (ICCV)","first-page":"1780","article-title":"MDETR-Modulated detection for end-to-end multimodal understanding","author":"Kamath","year":"2021"},{"key":"10.1016\/j.knosys.2025.114738_bib0060","doi-asserted-by":"crossref","DOI":"10.1016\/j.asoc.2024.111395","article-title":"Perceive, reason, and align: context-guided cross-modal correlation learning for image-text retrieval","volume":"154","author":"Liu","year":"2024","journal-title":"Appl. Soft Comput."},{"key":"10.1016\/j.knosys.2025.114738_bib0061","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR)","first-page":"5184","article-title":"ViSTA: vision and scene text aggregation for cross-modal retrieval","author":"Cheng","year":"2022"},{"key":"10.1016\/j.knosys.2025.114738_bib0062","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR)","first-page":"18166","article-title":"An empirical study of training end-to-end vision-and language transformers","author":"Dou","year":"2022"},{"key":"10.1016\/j.knosys.2025.114738_bib0063","series-title":"Proc. Int. Jt Conf. Neural Networks (IJCNN)","first-page":"1","article-title":"Adapting pre-trained language models to vision-language tasksvia dynamic visual prompting","author":"Huang","year":"2024"},{"key":"10.1016\/j.knosys.2025.114738_bib0064","first-page":"9694","article-title":"Align before fuse: vision and language representation learning with momentum distillation","volume":"34","author":"Li","year":"2021","journal-title":"Proc"},{"key":"10.1016\/j.knosys.2025.114738_bib0065","series-title":"Proc. Int. Conf. Learn. Represent. (ICLR)","article-title":"SimVLM: simple visual language model pretraining with weak supervision","author":"Wang","year":"2022"},{"year":"2022","author":"Yu","series-title":"Coca: contrastive captioners are image-text foundation models","key":"10.1016\/j.knosys.2025.114738_bib0066"},{"key":"10.1016\/j.knosys.2025.114738_bib0067","series-title":"Proc. Int. Conf. Mach. Learn. (ICML)","first-page":"23318","article-title":"OFA: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework","author":"Wang","year":"2022"},{"key":"10.1016\/j.knosys.2025.114738_bib0068","series-title":"Proc. IEEE\/CVF Wint. Conf. Appl. Comput. Vis. (WACV)","first-page":"769","article-title":"Improving vision-and-language reasoning via spatial relations modeling","author":"Yang","year":"2024"},{"key":"10.1016\/j.knosys.2025.114738_bib0069","first-page":"13854","article-title":"Implicit differentiable outlier detection enables robust deep multimodal analysis","volume":"36","author":"Wang","year":"2023","journal-title":"Proc"},{"key":"10.1016\/j.knosys.2025.114738_bib0070","series-title":"Proc. Int. Conf. Mach. Learn. PMLR","first-page":"25994","article-title":"Multi-grained vision language pretraining: aligning texts with visual concepts","author":"Zeng","year":"2022"},{"issue":"6","key":"10.1016\/j.knosys.2025.114738_bib0071","doi-asserted-by":"crossref","first-page":"3174","DOI":"10.1109\/TNNLS.2021.3111897","article-title":"Diet-SNN: a low-latency spiking neural network with direct input encoding and leakage and threshold optimization","volume":"34","author":"Rathi","year":"2021","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."}],"container-title":["Knowledge-Based Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0950705125017770?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0950705125017770?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T19:34:29Z","timestamp":1765222469000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0950705125017770"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11]]},"references-count":71,"alternative-id":["S0950705125017770"],"URL":"https:\/\/doi.org\/10.1016\/j.knosys.2025.114738","relation":{},"ISSN":["0950-7051"],"issn-type":[{"type":"print","value":"0950-7051"}],"subject":[],"published":{"date-parts":[[2025,11]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Improving vision-language alignment with graph spiking hybrid networks","name":"articletitle","label":"Article Title"},{"value":"Knowledge-Based Systems","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.knosys.2025.114738","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2025 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"114738"}}