{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T16:51:26Z","timestamp":1778086286583,"version":"3.51.4"},"reference-count":68,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100003399","name":"Science and Technology Commission of Shanghai Municipality","doi-asserted-by":"publisher","award":["24511103204"],"award-info":[{"award-number":["24511103204"]}],"id":[{"id":"10.13039\/501100003399","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100007219","name":"Shanghai Municipal Natural Science Foundation","doi-asserted-by":"publisher","award":["25ZR1401273"],"award-info":[{"award-number":["25ZR1401273"]}],"id":[{"id":"10.13039\/100007219","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012472","name":"Shanghai Educational Science Research Project","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100012472","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100013106","name":"Shanghai Science International Cooperation Project","doi-asserted-by":"publisher","award":["24170790602"],"award-info":[{"award-number":["24170790602"]}],"id":[{"id":"10.13039\/501100013106","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Expert Systems with Applications"],"published-print":{"date-parts":[[2026,4]]},"DOI":"10.1016\/j.eswa.2025.130961","type":"journal-article","created":{"date-parts":[[2025,12,29]],"date-time":"2025-12-29T23:47:40Z","timestamp":1767052060000},"page":"130961","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":1,"special_numbering":"C","title":["Towards enterprise-specific question-answering for IT operations and maintenance based on retrieval-augmented generation mechanism"],"prefix":"10.1016","volume":"307","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-9517-1091","authenticated-orcid":false,"given":"Zhuoxuan","family":"Jiang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5088-2222","authenticated-orcid":false,"given":"Tianyang","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0008-4843-3215","authenticated-orcid":false,"given":"Shengguang","family":"Bai","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0001-7984-4227","authenticated-orcid":false,"given":"Lin","family":"Lin","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0000-1483-6261","authenticated-orcid":false,"given":"Haotian","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0004-1492-8165","authenticated-orcid":false,"given":"Yinong","family":"Xun","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0002-6297-9018","authenticated-orcid":false,"given":"Jiawei","family":"Ren","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9569-6092","authenticated-orcid":false,"given":"Wen","family":"Si","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9289-3827","authenticated-orcid":false,"given":"Shaohua","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.eswa.2025.130961_bib0001","unstructured":"Achiam, J., Adler, S., Agarwal, S., Ahmad, L., Akkaya, I., Aleman, F. L., Almeida, D., Altenschmidt, J., Altman, S., Anadkat, S. et al. (2023). GPT-4 technical report. arXiv: 2303.08774."},{"key":"10.1016\/j.eswa.2025.130961_bib0002","series-title":"Practical text analytics","author":"Anandarajan","year":"2019"},{"key":"10.1016\/j.eswa.2025.130961_bib0003","series-title":"Bachelor thesis","article-title":"Prompt engineering guidelines for LLMs in requirements engineering","author":"Arvidsson","year":"2023"},{"key":"10.1016\/j.eswa.2025.130961_bib0004","unstructured":"Bai, J., Bai, S., Chu, Y., Cui, Z., Dang, K., Deng, X., Fan, Y., Ge, W., Han, Y., Huang, F. et al. (2023). Qwen technical report. arXiv: 2309.16609."},{"key":"10.1016\/j.eswa.2025.130961_bib0005","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"issue":"6","key":"10.1016\/j.eswa.2025.130961_bib0006","doi-asserted-by":"crossref","DOI":"10.1016\/j.patter.2025.101260","article-title":"Unleashing the potential of prompt engineering for large language models","volume":"6","author":"Chen","year":"2025","journal-title":"Patterns"},{"key":"10.1016\/j.eswa.2025.130961_bib0007","doi-asserted-by":"crossref","unstructured":"Chen, J., Xiao, S., Zhang, P., Luo, K., Lian, D., & Liu, Z. (2024). BGE M3-embedding: Multi-lingual, multi-functionality, multi-granularity text embeddings through self-knowledge distillation. arXiv: 2004.10964.","DOI":"10.18653\/v1\/2024.findings-acl.137"},{"key":"10.1016\/j.eswa.2025.130961_bib0008","series-title":"Proceedings of the 37th international conference on machine learning","first-page":"1597","article-title":"A simple framework for contrastive learning of visual representations","author":"Chen","year":"2020"},{"key":"10.1016\/j.eswa.2025.130961_bib0009","unstructured":"Chen, X., Fan, H., Girshick, R., & He, K. (2020b). Improved baselines with momentum contrastive learning. arXiv: 2003.04297."},{"issue":"240","key":"10.1016\/j.eswa.2025.130961_bib0010","first-page":"1","article-title":"PaLM: Scaling language modeling with pathways","volume":"24","author":"Chowdhery","year":"2023","journal-title":"Journal of Machine Learning Research"},{"key":"10.1016\/j.eswa.2025.130961_bib0011","series-title":"Proceedings of the 2017 ACM SIGSAC conference on computer and communications security","first-page":"1285","article-title":"DeepLog: Anomaly detection and diagnosis from system logs through deep learning","author":"Du","year":"2017"},{"key":"10.1016\/j.eswa.2025.130961_bib0012","series-title":"Proceedings of the 60th annual meeting of the association for computational linguistics (volume 1: Long papers)","first-page":"320","article-title":"GLM: General language model pretraining with autoregressive blank infilling","author":"Du","year":"2022"},{"issue":"9","key":"10.1016\/j.eswa.2025.130961_bib0013","doi-asserted-by":"crossref","first-page":"2841","DOI":"10.1080\/00207543.2019.1671627","article-title":"Cyber-based design for additive manufacturing using artificial neural networks for industry 4.0","volume":"58","author":"Elhoone","year":"2020","journal-title":"International Journal of Production Research"},{"issue":"5","key":"10.1016\/j.eswa.2025.130961_bib0014","doi-asserted-by":"crossref","first-page":"124","DOI":"10.1145\/1506409.1506439","article-title":"An overview of IT service management","volume":"52","author":"Galup","year":"2009","journal-title":"Communications of the ACM"},{"key":"10.1016\/j.eswa.2025.130961_bib0015","doi-asserted-by":"crossref","unstructured":"Gao, T., Yao, X., & Chen, D. (2021). SimCSE: Simple contrastive learning of sentence embeddings. arXiv: 2104.08821.","DOI":"10.18653\/v1\/2021.emnlp-main.552"},{"key":"10.1016\/j.eswa.2025.130961_bib0016","unstructured":"Gao, Y., Xiong, Y., Gao, X., Jia, K., Pan, J., Bi, Y., Dai, Y., Sun, J., & Wang, H. (2023). Retrieval-augmented generation for large language models: A survey. arXiv: 2312.10997."},{"key":"10.1016\/j.eswa.2025.130961_bib0017","series-title":"Proceedings of international conference on database systems for advanced applications","first-page":"490","article-title":"LogLG: Weakly supervised log anomaly detection via log-event graph construction","author":"Guo","year":"2023"},{"key":"10.1016\/j.eswa.2025.130961_bib0018","series-title":"Proceedings of the 12th international conference on learning representations","first-page":"1","article-title":"OWL: A large language model for IT operations","author":"Guo","year":"2024"},{"key":"10.1016\/j.eswa.2025.130961_bib0019","unstructured":"Gupta, A., Shirgaonkar, A., de Luis, B. A., Silva, B., Holstein, D., Li, D., Marsman, J., Nunes, L. O., Rouzbahman, M., Sharp, M. et al. (2024). Rag vs fine-tuning: Pipelines, tradeoffs, and a case study on agriculture. arXiv: 2401.08406."},{"key":"10.1016\/j.eswa.2025.130961_bib0020","doi-asserted-by":"crossref","unstructured":"Gururangan, S., Marasovi\u0107, A., Swayamdipta, S., Lo, K., Beltagy, I., Downey, D., & Smith, N. A. (2020). Don\u2019t stop pretraining: Adapt language models to domains and tasks. arXiv: 2004.10964.","DOI":"10.18653\/v1\/2020.acl-main.740"},{"issue":"1","key":"10.1016\/j.eswa.2025.130961_bib0021","doi-asserted-by":"crossref","first-page":"114","DOI":"10.1177\/1094428120971683","article-title":"Text preprocessing for text mining in organizational research: Review and recommendations","volume":"25","author":"Hickman","year":"2022","journal-title":"Organizational Research Methods"},{"key":"10.1016\/j.eswa.2025.130961_bib0022","unstructured":"Hou, X., Zhao, Y., Liu, Y., Yang, Z., Wang, K., Li, L., Luo, X., Lo, D., Grundy, J., & Wang, H. (2023). Large language models for software engineering: A systematic literature review. arXiv: 2308.10620."},{"key":"10.1016\/j.eswa.2025.130961_bib0023","series-title":"Proceedings of the 36th international conference on machine learning","first-page":"2790","article-title":"Parameter-efficient transfer learning for NLP","author":"Houlsby","year":"2019"},{"key":"10.1016\/j.eswa.2025.130961_bib0024","unstructured":"Hu, E. J., Shen, Y., Wallis, P., Allen-Zhu, Z., Li, Y., Wang, S., Wang, L., & Chen, W. (2021). Lora: Low-rank adaptation of large language models. arXiv: 2106.09685."},{"key":"10.1016\/j.eswa.2025.130961_bib0025","series-title":"Proceedings of the 10th international conference on learning representations","first-page":"1","article-title":"LoRA: Low-rank adaptation of large language models","author":"Hu","year":"2022"},{"key":"10.1016\/j.eswa.2025.130961_bib0026","unstructured":"Huang, L., Yu, W., Ma, W., Zhong, W., Feng, Z., Wang, H., Chen, Q., Peng, W., Feng, X., Qin, B. et al. (2023a). A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. arXiv: 2311.05232."},{"key":"10.1016\/j.eswa.2025.130961_bib0027","unstructured":"Huang, S., Liu, Y., Fung, C., Qi, J., Yang, H., & Luan, Z. (2023b). LogQA: Question answering in unstructured logs. arXiv: 2303.11715."},{"key":"10.1016\/j.eswa.2025.130961_bib0028","first-page":"1","article-title":"C-Eval: A multi-level multi-discipline chinese evaluation suite for foundation models","volume":"36","author":"Huang","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"issue":"2","key":"10.1016\/j.eswa.2025.130961_bib0029","doi-asserted-by":"crossref","first-page":"191","DOI":"10.4301\/S1807-17752017000200004","article-title":"Proactive management of IT operations to improve IT services","volume":"14","author":"J\u00e4ntti","year":"2017","journal-title":"Journal of Information Systems and Technology Management"},{"issue":"3","key":"10.1016\/j.eswa.2025.130961_bib0030","doi-asserted-by":"crossref","first-page":"535","DOI":"10.1109\/TBDATA.2019.2921572","article-title":"Billion-scale similarity search with GPUs","volume":"7","author":"Johnson","year":"2019","journal-title":"IEEE Transactions on Big Data"},{"key":"10.1016\/j.eswa.2025.130961_bib0031","series-title":"Proceedings of the 2020 conference on empirical methods in natural language processing","first-page":"6769","article-title":"Dense passage retrieval for open-domain question answering","author":"Karpukhin","year":"2020"},{"key":"10.1016\/j.eswa.2025.130961_bib0032","series-title":"Proceedings of the 42nd international conference on machine learning","first-page":"29768","article-title":"SparseloRA: Accelerating LLM fine-tuning with contextual sparsity","author":"Khaki","year":"2025"},{"key":"10.1016\/j.eswa.2025.130961_bib0033","series-title":"Proceedings of the 2021 conference on empirical methods in natural language processing","first-page":"3045","article-title":"The power of scale for parameter-efficient prompt tuning","author":"Lester","year":"2021"},{"key":"10.1016\/j.eswa.2025.130961_bib0034","series-title":"Proceedings of the 59th annual meeting of the association for computational linguistics and the 11th international joint conference on natural language processing (volume 1: Long papers)","first-page":"4582","article-title":"Prefix-tuning: Optimizing continuous prompts for generation","author":"Li","year":"2021"},{"key":"10.1016\/j.eswa.2025.130961_bib0035","unstructured":"Li, Z., Zhang, X., Zhang, Y., Long, D., Xie, P., & Zhang, M. (2023). Towards general text embeddings with multi-stage contrastive learning. arXiv: 2308.03281."},{"key":"10.1016\/j.eswa.2025.130961_bib0036","unstructured":"Lin, X. V., Chen, X., Chen, M., Shi, W., Lomeli, M., James, R., Rodriguez, P., Kahn, J., Szilvasy, G., Lewis, M. et al. (2023). RA-DIT: Retrieval-augmented dual instruction tuning. arXiv: 2310.01352."},{"key":"10.1016\/j.eswa.2025.130961_bib0037","series-title":"Proceedings of the 2024 IEEE\/ACM 46th international conference on software engineering: Companion proceedings","first-page":"364","article-title":"Logprompt: Prompt engineering towards zero-shot and interpretable log analysis","author":"Liu","year":"2024"},{"key":"10.1016\/j.eswa.2025.130961_bib0038","doi-asserted-by":"crossref","unstructured":"Long, L., Wang, R., Xiao, R., Zhao, J., Ding, X., Chen, G., & Wang, H. (2024). On LLMs-driven synthetic data generation, curation, and evaluation: A survey. arXiv: 2406.15126.","DOI":"10.18653\/v1\/2024.findings-acl.658"},{"key":"10.1016\/j.eswa.2025.130961_bib0039","series-title":"Data intelligence and cognitive informatics","first-page":"387","article-title":"Prompt engineering in large language models","author":"Marvin","year":"2024"},{"key":"10.1016\/j.eswa.2025.130961_bib0040","unstructured":"Mohr, I., Krimmel, M., Sturua, S., Akram, M. K., Koukounas, A., G\u00fcnther, M., Mastrapas, G., Ravishankar, V., Mart\u00ednez, J. F., Wang, F. et al. (2024). Multi-task contrastive learning for 8192-token bilingual text embeddings. arXiv: 2402.17016."},{"key":"10.1016\/j.eswa.2025.130961_bib0041","doi-asserted-by":"crossref","unstructured":"Muennighoff, N., Tazi, N., Magne, L., & Reimers, N. (2022). MTEB: Massive text embedding benchmark. arXiv: 2210.07316.","DOI":"10.18653\/v1\/2023.eacl-main.148"},{"key":"10.1016\/j.eswa.2025.130961_bib0042","series-title":"Proceedings of international conference on service-oriented computing","first-page":"110","article-title":"A systematic mapping study in AIOps","author":"Notaro","year":"2020"},{"key":"10.1016\/j.eswa.2025.130961_bib0043","unstructured":"Qu, Y., Ding, Y., Liu, J., Liu, K., Ren, R., Zhao, W. X., Dong, D., Wu, H., & Wang, H. (2020). RocketQA: An optimized training approach to dense passage retrieval for open-domain question answering. arXiv: 2010.08191."},{"key":"10.1016\/j.eswa.2025.130961_bib0044","unstructured":"Quercia, A., Cao, Z., Bangun, A., Paul, R. D., Morrison, A., Assent, I., & Scharr, H. (2025). 1LoRA: Summation compression for very low-rank adaptation. arXiv: 2503.08333."},{"key":"10.1016\/j.eswa.2025.130961_bib0045","series-title":"Artificial Intelligence for Cloud and Edge Computing","first-page":"31","article-title":"AIOps: A multivocal literature review","author":"Rijal","year":"2022"},{"key":"10.1016\/j.eswa.2025.130961_bib0046","series-title":"Conference and labs of the evaluation forum","first-page":"712","article-title":"GPT Hallucination detection through prompt engineering","author":"Siino","year":"2024"},{"key":"10.1016\/j.eswa.2025.130961_bib0047","series-title":"2024 IEEE International symposium on measurements & networking (m&n)","first-page":"1","article-title":"GPT Prompt engineering for scheduling appliances usage for energy cost optimization","author":"Siino","year":"2024"},{"key":"10.1016\/j.eswa.2025.130961_bib0048","series-title":"Conference and labs of the evaluation forum","first-page":"1228","article-title":"Prompt engineering for identifying sexism using GPT mistral 7B","author":"Siino","year":"2024"},{"key":"10.1016\/j.eswa.2025.130961_bib0049","doi-asserted-by":"crossref","DOI":"10.1016\/j.is.2023.102342","article-title":"Is text preprocessing still worth the time? A comparative survey on the influence of popular preprocessing methods on transformers and traditional classifiers","volume":"121","author":"Siino","year":"2024","journal-title":"Information Systems"},{"key":"10.1016\/j.eswa.2025.130961_bib0050","unstructured":"Tastan, N., Laskaridis, S., Takac, M., Nandakumar, K., & Horvath, S. (2025). LoFT: Low-rank adaptation that behaves like full fine-tuning. arXiv: 2505.21289."},{"key":"10.1016\/j.eswa.2025.130961_bib0051","unstructured":"Touvron, H., Martin, L., Stone, K., Albert, P., Almahairi, A., Babaei, Y., Bashlykov, N., Batra, S., Bhargava, P., Bhosale, S. et al. (2023). Llama 2: Open foundation and fine-tuned chat models. arXiv: 2307.09288."},{"issue":"230","key":"10.1016\/j.eswa.2025.130961_bib0052","doi-asserted-by":"crossref","first-page":"9","DOI":"10.15446\/dyna.v90n230.111700","article-title":"Prompt engineering: A methodology for optimizing interactions with AI-language models in the field of engineering","volume":"90","author":"Vel\u00e1squez-Henao","year":"2023","journal-title":"DYNA"},{"key":"10.1016\/j.eswa.2025.130961_bib0053","unstructured":"Wang, B., Ping, W., McAfee, L., Xu, P., Li, B., Shoeybi, M., & Catanzaro, B. (2023a). Instructretro: Instruction tuning post retrieval-augmented pretraining. arXiv: 2310.07713."},{"key":"10.1016\/j.eswa.2025.130961_bib0054","unstructured":"Wang, H., Huang, W., Deng, Y., Wang, R., Wang, Z., Wang, Y., Mi, F., Pan, J. Z., & Wong, K.-F. (2024a). UniMS-RAG: A unified multi-source retrieval-augmented generation for personalized dialogue systems. arXiv: 2401.13256."},{"issue":"1","key":"10.1016\/j.eswa.2025.130961_bib0055","doi-asserted-by":"crossref","first-page":"41","DOI":"10.1038\/s41746-024-01029-4","article-title":"Prompt engineering in consistency and reliability with the evidence-based guideline for LLMs","volume":"7","author":"Wang","year":"2024","journal-title":"NPJ Digital Medicine"},{"key":"10.1016\/j.eswa.2025.130961_bib0056","unstructured":"Wang, L., Yang, N., Huang, X., Jiao, B., Yang, L., Jiang, D., Majumder, R., & Wei, F. (2022). Text embeddings by weakly-supervised contrastive pre-training. arXiv: 2212.03533."},{"key":"10.1016\/j.eswa.2025.130961_bib0057","unstructured":"Wang, Y., Sun, Q., & He, S. (2023b). M3E: Moka massive mixed embedding model. https:\/\/github.com\/wangyuxinwhy\/uniem."},{"key":"10.1016\/j.eswa.2025.130961_bib0058","unstructured":"Wu, S., Huang, Y., Gao, C., Chen, D., Zhang, Q., Wan, Y., Zhou, T., Zhang, X., Gao, J., Xiao, C. et al. (2024). UniGen: A unified framework for textual dataset generation using large language models. arXiv: 2406.18966."},{"key":"10.1016\/j.eswa.2025.130961_bib0059","doi-asserted-by":"crossref","unstructured":"Xiao, S., Liu, Z., Shao, Y., Lian, D., & Xie, X. (2021). Matching-oriented product quantization for Ad-hoc retrieval. arXiv: 2104.07858.","DOI":"10.18653\/v1\/2021.emnlp-main.640"},{"key":"10.1016\/j.eswa.2025.130961_bib0060","unstructured":"Xiao, S., Liu, Z., Zhang, P., & Muennighoff, N. (2023). C-Pack: Packaged resources to advance general Chinese embedding. arXiv: 2309.07597."},{"key":"10.1016\/j.eswa.2025.130961_bib0061","doi-asserted-by":"crossref","unstructured":"Xu, C., Guo, D., Duan, N., & McAuley, J. (2023a). Baize: An open-source chat model with parameter-efficient tuning on self-chat data. arXiv: 2304.01196.","DOI":"10.18653\/v1\/2023.emnlp-main.385"},{"key":"10.1016\/j.eswa.2025.130961_bib0062","unstructured":"Xu, M. (2023). Text2vec: Text to vector toolkit. https:\/\/github.com\/shibing624\/text2vec."},{"key":"10.1016\/j.eswa.2025.130961_bib0063","unstructured":"Xu, P., Ping, W., Wu, X., McAfee, L., Zhu, C., Liu, Z., Subramanian, S., Bakhturina, E., Shoeybi, M., & Catanzaro, B. (2023b). Retrieval meets long context large language models. arXiv: 2310.03025."},{"key":"10.1016\/j.eswa.2025.130961_bib0064","doi-asserted-by":"crossref","unstructured":"Ye, Q., Axmed, M., Pryzant, R., & Khani, F. (2024). Prompt engineering a prompt engineer. arXiv: 2311.05661.","DOI":"10.18653\/v1\/2024.findings-acl.21"},{"key":"10.1016\/j.eswa.2025.130961_bib0065","unstructured":"Zeng, A., Liu, X., Du, Z., Wang, Z., Lai, H., Ding, M., Yang, Z., Xu, Y., Zheng, W., Xia, X. et al. (2022). GLM-130B: An open bilingual pre-trained model. arXiv: 2210.02414."},{"key":"10.1016\/j.eswa.2025.130961_bib0066","unstructured":"Zhang, P., Xiao, S., Liu, Z., Dou, Z., & Nie, J.-Y. (2023). Retrieve anything to augment large language models. arXiv: 2310.07554."},{"key":"10.1016\/j.eswa.2025.130961_bib0067","unstructured":"Zhang, T., Patil, S. G., Jain, N., Shen, S., Zaharia, M., Stoica, I., & Gonzalez, J. E. (2024). RAFT: Adapting language model to domain specific rag. arXiv: 2403.10131."},{"key":"10.1016\/j.eswa.2025.130961_bib0068","first-page":"46595","article-title":"Judging LLM-as-a-judge with MT-bench and chatbot arena","volume":"36","author":"Zheng","year":"2024","journal-title":"Advances in Neural Information Processing Systems"}],"container-title":["Expert Systems with Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0957417425045762?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0957417425045762?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T01:03:28Z","timestamp":1777597408000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0957417425045762"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4]]},"references-count":68,"alternative-id":["S0957417425045762"],"URL":"https:\/\/doi.org\/10.1016\/j.eswa.2025.130961","relation":{},"ISSN":["0957-4174"],"issn-type":[{"value":"0957-4174","type":"print"}],"subject":[],"published":{"date-parts":[[2026,4]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Towards enterprise-specific question-answering for IT operations and maintenance based on retrieval-augmented generation mechanism","name":"articletitle","label":"Article Title"},{"value":"Expert Systems with Applications","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.eswa.2025.130961","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2025 Elsevier Ltd. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"130961"}}