{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T00:38:22Z","timestamp":1774917502848,"version":"3.50.1"},"reference-count":47,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2020,4,1]],"date-time":"2020-04-01T00:00:00Z","timestamp":1585699200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2020,4,1]],"date-time":"2020-04-01T00:00:00Z","timestamp":1585699200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"}],"funder":[{"DOI":"10.13039\/501100003995","name":"Natural Science Foundation of Anhui Province","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003995","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100010814","name":"Anhui Department of Education","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100010814","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Neurocomputing"],"published-print":{"date-parts":[[2020,4]]},"DOI":"10.1016\/j.neucom.2020.01.045","type":"journal-article","created":{"date-parts":[[2020,1,14]],"date-time":"2020-01-14T20:34:01Z","timestamp":1579034041000},"page":"210-220","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":50,"special_numbering":"C","title":["A cross-modal adaptive gated fusion generative adversarial network for RGB-D salient object detection"],"prefix":"10.1016","volume":"387","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7782-9568","authenticated-orcid":false,"given":"Zhengyi","family":"Liu","sequence":"first","affiliation":[]},{"given":"Wei","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Peng","family":"Zhao","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.neucom.2020.01.045_bib0001","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"3431","article-title":"Fully convolutional networks for semantic segmentation","author":"Long","year":"2015"},{"key":"10.1016\/j.neucom.2020.01.045_bib0002","series-title":"Proceedings of the 1991 IEEE Computer Society Conference on Computer Vision and Pattern Recognition","first-page":"586","article-title":"Face recognition using eigenfaces","author":"Turk","year":"1991"},{"issue":"9","key":"10.1016\/j.neucom.2020.01.045_bib0003","doi-asserted-by":"crossref","first-page":"1627","DOI":"10.1109\/TPAMI.2009.167","article-title":"Object detection with discriminatively trained part-based models","volume":"32","author":"Felzenszwalb","year":"2009","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"1\u20133","key":"10.1016\/j.neucom.2020.01.045_bib0004","doi-asserted-by":"crossref","first-page":"125","DOI":"10.1007\/s11263-007-0075-7","article-title":"Incremental learning for robust visual tracking","volume":"77","author":"Ross","year":"2008","journal-title":"Int. J. Comput. Vis."},{"issue":"7","key":"10.1016\/j.neucom.2020.01.045_bib0005","doi-asserted-by":"crossref","first-page":"1655","DOI":"10.1109\/TPAMI.2018.2846566","article-title":"Fine-tuning CNN image retrieval with no human annotation","volume":"41","author":"Radenovi\u0107","year":"2018","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.neucom.2020.01.045_bib0006","series-title":"Proceedings of the Advances in Neural Information Processing systems","first-page":"1097","article-title":"Imagenet classification with deep convolutional neural networks","author":"Krizhevsky","year":"2012"},{"key":"10.1016\/j.neucom.2020.01.045_bib0007","series-title":"Proceedings of the 2014 IEEE International Conference on Image Processing (ICIP)","first-page":"1115","article-title":"Depth saliency based on anisotropic center-surround difference","author":"Ju","year":"2014"},{"key":"10.1016\/j.neucom.2020.01.045_bib0008","series-title":"Proceedings of the 2018 IEEE Twenty-third International Conference on Digital Signal Processing (DSP)","first-page":"1","article-title":"Rgbd salient object detection using spatially coherent deep learning framework","author":"Huang","year":"2018"},{"key":"10.1016\/j.neucom.2020.01.045_bib0009","doi-asserted-by":"crossref","unstructured":"C. Zhu, X. Cai, K. Huang, T.H. Li, G. Li, Pdnet: prior-model guided depth-enhanced network for salient object detection, arXiv:1803.08636(2018).","DOI":"10.1109\/ICME.2019.00042"},{"key":"10.1016\/j.neucom.2020.01.045_bib0010","doi-asserted-by":"crossref","first-page":"55277","DOI":"10.1109\/ACCESS.2019.2913107","article-title":"Adaptive fusion for RGB-D salient object detection","volume":"7","author":"Ningning","year":"2019","journal-title":"IEEE Access"},{"key":"10.1016\/j.neucom.2020.01.045_bib0011","series-title":"Proceedings of the IJCAI","first-page":"3411","article-title":"Saliency transfer: An example-based method for salient object detection.","author":"Li","year":"2016"},{"key":"10.1016\/j.neucom.2020.01.045_bib0012","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"478","article-title":"Deep contrast learning for salient object detection","author":"Li","year":"2016"},{"key":"10.1016\/j.neucom.2020.01.045_bib0013","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"6609","article-title":"Non-local deep features for salient object detection","author":"Luo","year":"2017"},{"key":"10.1016\/j.neucom.2020.01.045_bib0014","series-title":"Proceedings of the IEEE International Conference on Computer Vision","first-page":"1395","article-title":"Holistically-nested edge detection","author":"Xie","year":"2015"},{"key":"10.1016\/j.neucom.2020.01.045_bib0015","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"3203","article-title":"Deeply supervised salient object detection with short connections","author":"Hou","year":"2017"},{"key":"10.1016\/j.neucom.2020.01.045_bib0016","series-title":"Proceedings of the Twenty-fifth ACM international conference on Multimedia","first-page":"439","article-title":"Multi-scale cascade network for salient object detection","author":"Li","year":"2017"},{"key":"10.1016\/j.neucom.2020.01.045_bib0017","series-title":"Proceedings of the European Conference on Computer Vision (ECCV)","first-page":"355","article-title":"Contour knowledge transfer for salient object detection","author":"Li","year":"2018"},{"key":"10.1016\/j.neucom.2020.01.045_bib0018","doi-asserted-by":"crossref","first-page":"48890","DOI":"10.1109\/ACCESS.2019.2910572","article-title":"Convolutional edge constraint-based u-net for salient object detection","volume":"7","author":"Han","year":"2019","journal-title":"IEEE Access"},{"issue":"7","key":"10.1016\/j.neucom.2020.01.045_bib0019","doi-asserted-by":"crossref","first-page":"3214","DOI":"10.1109\/TNNLS.2017.2727140","article-title":"Patch alignment manifold matting","volume":"29","author":"Li","year":"2017","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"10.1016\/j.neucom.2020.01.045_bib0020","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops","first-page":"25","article-title":"Exploiting global priors for RGB-D saliency detection","author":"Ren","year":"2015"},{"issue":"11","key":"10.1016\/j.neucom.2020.01.045_bib0021","doi-asserted-by":"crossref","first-page":"3171","DOI":"10.1109\/TCYB.2017.2761775","article-title":"Cnns-based RGB-D saliency detection via cross-view transfer and multiview fusion","volume":"48","author":"Han","year":"2017","journal-title":"IEEE Trans. Cybern."},{"key":"10.1016\/j.neucom.2020.01.045_bib0022","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"3051","article-title":"Progressively complementarity-aware fusion network for RGB-D salient object detection","author":"Chen","year":"2018"},{"key":"10.1016\/j.neucom.2020.01.045_bib0023","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"1","article-title":"Contrast prior and fluid pyramid integration for rgbd salient object detection","author":"Zhao","year":"2019"},{"key":"10.1016\/j.neucom.2020.01.045_bib0024","series-title":"Proceedings of the Advances in Neural Information Processing Systems","first-page":"2672","article-title":"Generative adversarial nets","author":"Goodfellow","year":"2014"},{"key":"10.1016\/j.neucom.2020.01.045_bib0025","unstructured":"M. Mirza, S. Osindero, Conditional generative adversarial nets, arXiv:1411.1784(2014)."},{"key":"10.1016\/j.neucom.2020.01.045_bib0026","doi-asserted-by":"crossref","first-page":"356","DOI":"10.1016\/j.patcog.2018.11.028","article-title":"Simultaneous color-depth super-resolution with conditional generative adversarial networks","volume":"88","author":"Zhao","year":"2019","journal-title":"Pattern Recognit."},{"issue":"9","key":"10.1016\/j.neucom.2020.01.045_bib0027","doi-asserted-by":"crossref","first-page":"2237","DOI":"10.1109\/TMM.2019.2900908","article-title":"Salient object detection using cascaded convolutional neural networks and adversarial learning","volume":"21","author":"Tang","year":"2019","journal-title":"IEEE Trans Multimed."},{"key":"10.1016\/j.neucom.2020.01.045_bib0028","unstructured":"K. Simonyan, A. Zisserman, Very deep convolutional networks for large-scale image recognition, arXiv:1409.1556(2014)."},{"key":"10.1016\/j.neucom.2020.01.045_bib0029","series-title":"Proceedings of the 2009 IEEE Conference on Computer Vision and Pattern Recognition","first-page":"248","article-title":"Imagenet: A large-scale hierarchical image database","author":"Deng","year":"2009"},{"key":"10.1016\/j.neucom.2020.01.045_bib0030","series-title":"Proceedings of the IEEE International Conference on Computer Vision","first-page":"4980","article-title":"Rdfnet: RGB-D multi-level residual feature fusion for indoor semantic segmentation","author":"Park","year":"2017"},{"key":"10.1016\/j.neucom.2020.01.045_bib0031","first-page":"2","article-title":"Learning visual representations at scale","volume":"1","author":"Vanhoucke","year":"2014","journal-title":"ICLR Inv. Talk"},{"key":"10.1016\/j.neucom.2020.01.045_bib0032","unstructured":"L. Sifre, S. Mallat, Rigid-motion scattering for image classification, Ph. D. dissertation (2014)."},{"key":"10.1016\/j.neucom.2020.01.045_bib0033","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"770","article-title":"Deep residual learning for image recognition","author":"He","year":"2016"},{"key":"10.1016\/j.neucom.2020.01.045_bib0034","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"6848","article-title":"Shufflenet: An extremely efficient convolutional neural network for mobile devices","author":"Zhang","year":"2018"},{"key":"10.1016\/j.neucom.2020.01.045_bib0035","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"1251","article-title":"Xception: Deep learning with depthwise separable convolutions","author":"Chollet","year":"2017"},{"key":"10.1016\/j.neucom.2020.01.045_bib0036","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"3029","article-title":"Locality-sensitive deconvolution networks with gated fusion for RGB-D indoor semantic segmentation","author":"Cheng","year":"2017"},{"key":"10.1016\/j.neucom.2020.01.045_bib0037","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"92","article-title":"Rgbd salient object detection: A benchmark and algorithms","author":"Peng","year":"2014"},{"key":"10.1016\/j.neucom.2020.01.045_bib0038","series-title":"Proceedings of the 2012 IEEE Conference on Computer Vision and Pattern Recognition","first-page":"454","article-title":"Leveraging stereopsis for saliency analysis","author":"Niu","year":"2012"},{"key":"10.1016\/j.neucom.2020.01.045_bib0039","series-title":"Proceedings of the ACM ICIMCS","first-page":"23","article-title":"Depth enhanced saliency detection method","author":"Cheng","year":"2014"},{"key":"10.1016\/j.neucom.2020.01.045_bib0040","unstructured":"D.-P. Fan, Z. Lin, J.-X. Zhao, Y. Liu, Z. Zhang, Q. Hou, M. Zhu, M.-M. Cheng, Rethinking RGB-D salient object detection: models, datasets, and large-scale benchmarks, arXiv:1907.06781(2019)."},{"key":"10.1016\/j.neucom.2020.01.045_bib0041","series-title":"Proceedings of the IEEE International Conference on Computer Vision","first-page":"4548","article-title":"Structure-measure: A new way to evaluate foreground maps","author":"Fan","year":"2017"},{"key":"10.1016\/j.neucom.2020.01.045_bib0042","series-title":"International Joint Conference on Artificial Intelligence (IJCAI)","first-page":"698","article-title":"Enhanced-alignment measure for binary foreground map evaluation","author":"Fan","year":"2018"},{"key":"10.1016\/j.neucom.2020.01.045_bib0043","article-title":"Tensorflow: Large-scale machine learning on heterogeneous distributed systems","author":"Abadi","year":"2016","journal-title":"CoRR"},{"key":"10.1016\/j.neucom.2020.01.045_bib0044","series-title":"Proceedings of the 2016 IEEE International Conference on Multimedia and Expo (ICME)","first-page":"1","article-title":"Salient object detection for RGB-d image via saliency evolution","author":"Guo","year":"2016"},{"issue":"5","key":"10.1016\/j.neucom.2020.01.045_bib0045","doi-asserted-by":"crossref","first-page":"2274","DOI":"10.1109\/TIP.2017.2682981","article-title":"RGBD salient object detection via deep fusion","volume":"26","author":"Qu","year":"2017","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.neucom.2020.01.045_bib0046","doi-asserted-by":"crossref","first-page":"376","DOI":"10.1016\/j.patcog.2018.08.007","article-title":"Multi-modal fusion network with multi-scale multi-path and cross-modal interactions for RGB-D salient object detection","volume":"86","author":"Chen","year":"2019","journal-title":"Pattern Recognit."},{"issue":"6","key":"10.1016\/j.neucom.2020.01.045_bib0047","doi-asserted-by":"crossref","first-page":"2825","DOI":"10.1109\/TIP.2019.2891104","article-title":"Three-stream attention-aware network for RGB-D salient object detection","volume":"28","author":"Chen","year":"2019","journal-title":"IEEE Trans. Image Process."}],"container-title":["Neurocomputing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0925231220300904?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0925231220300904?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,10,13]],"date-time":"2025-10-13T18:58:22Z","timestamp":1760381902000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0925231220300904"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,4]]},"references-count":47,"alternative-id":["S0925231220300904"],"URL":"https:\/\/doi.org\/10.1016\/j.neucom.2020.01.045","relation":{},"ISSN":["0925-2312"],"issn-type":[{"value":"0925-2312","type":"print"}],"subject":[],"published":{"date-parts":[[2020,4]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"A cross-modal adaptive gated fusion generative adversarial network for RGB-D salient object detection","name":"articletitle","label":"Article Title"},{"value":"Neurocomputing","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.neucom.2020.01.045","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2020 Elsevier B.V. All rights reserved.","name":"copyright","label":"Copyright"}]}}