{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T21:31:18Z","timestamp":1743111078785,"version":"3.40.3"},"publisher-location":"Cham","reference-count":36,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031781216"},{"type":"electronic","value":"9783031781223"}],"license":[{"start":{"date-parts":[[2024,12,5]],"date-time":"2024-12-05T00:00:00Z","timestamp":1733356800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,5]],"date-time":"2024-12-05T00:00:00Z","timestamp":1733356800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-78122-3_26","type":"book-chapter","created":{"date-parts":[[2024,12,4]],"date-time":"2024-12-04T07:16:45Z","timestamp":1733296605000},"page":"407-423","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["$$E^2DAS$$: An Efficient Equivariant Dynamic Aggregation Saliency Model for\u00a0Omnidirectional Images"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9843-5269","authenticated-orcid":false,"given":"Nana","family":"Zhang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0002-6862-9908","authenticated-orcid":false,"given":"Qian","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6436-7088","authenticated-orcid":false,"given":"Dandan","family":"Zhu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5773-5089","authenticated-orcid":false,"given":"Kun","family":"Zhu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8165-9322","authenticated-orcid":false,"given":"Guangtao","family":"Zhai","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4029-3322","authenticated-orcid":false,"given":"Xiaokang","family":"Yang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,5]]},"reference":[{"key":"26_CR1","doi-asserted-by":"crossref","unstructured":"Abdelaziz, Y., Djilali, D., Krishna, T., McGuinness, K., O\u2019Connor, N.E.: Rethinking $$360^{\\circ }$$ image visual attention modelling with unsupervised learning. In:2021 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 15394\u201315404 (2021)","DOI":"10.1109\/ICCV48922.2021.01513"},{"key":"26_CR2","doi-asserted-by":"publisher","first-page":"53","DOI":"10.1016\/j.image.2018.03.008","volume":"69","author":"F Battisti","year":"2018","unstructured":"Battisti, F., Baldoni, S., Brizzi, M., Carli, M.: A feature-based approach for saliency estimation of omni-directional images. Signal Process. Image Commun. 69, 53\u201359 (2018)","journal-title":"Signal Process. Image Commun."},{"key":"26_CR3","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1109\/TPAMI.2018.2815601","volume":"41","author":"Z Bylinskii","year":"2016","unstructured":"Bylinskii, Z., Judd, T., Oliva, A., Torralba, A., Durand, F.: What do different evaluation metrics tell us about saliency models? IEEE Trans. Pattern Anal. Mach. Intell. 41, 740\u2013757 (2016)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"26_CR4","doi-asserted-by":"crossref","unstructured":"Chao, F.Y., Zhang, L., Hamidouche, W., D\u00e9forges, O.: Salgan360: Visual saliency prediction on 360 degree images with generative adversarial networks. In: 2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW), pp. 01\u201304 (2018)","DOI":"10.1109\/ICMEW.2018.8551543"},{"key":"26_CR5","doi-asserted-by":"crossref","unstructured":"Chen, D., Qing, C., Xu, X., Zhu, H.: Salbinet360: saliency prediction on $$360^{\\circ }$$ images with local-global bifurcated deep network. In: 2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), pp. 92\u2013100 (2020)","DOI":"10.1109\/VR46266.2020.00027"},{"key":"26_CR6","doi-asserted-by":"crossref","unstructured":"Chen, Y., Dai, X., Liu, M., Chen, D., Yuan, L., Liu, Z.: Dynamic convolution: Attention over convolution kernels. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11027\u201311036 (2019)","DOI":"10.1109\/CVPR42600.2020.01104"},{"key":"26_CR7","doi-asserted-by":"publisher","first-page":"569","DOI":"10.1109\/TPAMI.2014.2345401","volume":"37","author":"MM Cheng","year":"2015","unstructured":"Cheng, M.M., Mitra, N.J., Huang, X., Torr, P.H.S., Hu, S.: Global contrast based salient region detection. IEEE Trans. Pattern Anal. Mach. Intell. 37, 569\u2013582 (2015)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"26_CR8","unstructured":"Cohen, T., Welling, M.: Group equivariant convolutional networks. In: Proceedings of The 33rd International Conference on Machine Learning. Proceedings of Machine Learning Research, vol.\u00a048, pp. 2990\u20132999. New York, New York, USA (20\u201322 Jun 2016)"},{"key":"26_CR9","doi-asserted-by":"crossref","unstructured":"Cornia, M., Baraldi, L., Serra, G., Cucchiara, R.: A deep multi-level network for saliency prediction. In: 2016 23rd International Conference on Pattern Recognition (ICPR), pp. 3488\u20133493 (2016)","DOI":"10.1109\/ICPR.2016.7900174"},{"key":"26_CR10","doi-asserted-by":"publisher","first-page":"5142","DOI":"10.1109\/TIP.2018.2851672","volume":"27","author":"M Cornia","year":"2016","unstructured":"Cornia, M., Baraldi, L., Serra, G., Cucchiara, R.: Predicting human eye fixations via an lstm-based saliency attentive model. IEEE Trans. Image Process. 27, 5142\u20135154 (2016)","journal-title":"IEEE Trans. Image Process."},{"key":"26_CR11","doi-asserted-by":"crossref","unstructured":"Generative adversarial networks: Goodfellow, I.J., Pouget-Abadie, J., Mirza, M., et al., B.X. Commun. ACM 63, 139\u2013144 (2014)","DOI":"10.1145\/3422622"},{"key":"26_CR12","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"26_CR13","unstructured":"He, L., Chen, Y., shen, z., Dong, Y., Wang, Y., Lin, Z.: Efficient equivariant network. In: Advances in Neural Information Processing Systems, vol.\u00a034, pp. 5290\u20135302 (2021)"},{"key":"26_CR14","doi-asserted-by":"crossref","unstructured":"He, S., Tavakoli, H.R., Borji, A., Mi, Y., Pugeault, N.: Understanding and visualizing deep visual saliency models. 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10198\u201310207 (2019)","DOI":"10.1109\/CVPR.2019.01045"},{"key":"26_CR15","doi-asserted-by":"crossref","unstructured":"Howard, A.G., Sandler, M., et\u00a0al., G.C.: Searching for mobilenetv3. In: 2019 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 1314\u20131324 (2019)","DOI":"10.1109\/ICCV.2019.00140"},{"key":"26_CR16","unstructured":"Howard, A.G., et al.: Mobilenets: Efficient convolutional neural networks for mobile vision applications. CoRR abs\/1704.04861 (2017)"},{"key":"26_CR17","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7132\u20137141 (2018)","DOI":"10.1109\/CVPR.2018.00745"},{"key":"26_CR18","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van Der\u00a0Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2261\u20132269 (2017)","DOI":"10.1109\/CVPR.2017.243"},{"key":"26_CR19","doi-asserted-by":"crossref","unstructured":"Huang, X., Shen, C., Boix, X., Zhao, Q.: Salicon: reducing the semantic gap in saliency prediction by adapting deep neural networks. In: 2015 IEEE International Conference on Computer Vision (ICCV), pp. 262\u2013270 (2015)","DOI":"10.1109\/ICCV.2015.38"},{"key":"26_CR20","doi-asserted-by":"crossref","unstructured":"Jiang, M., Huang, S., Duan, J., Zhao, Q.: Salicon: saliency in context. In: 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1072\u20131080 (2015)","DOI":"10.1109\/CVPR.2015.7298710"},{"issue":"6","key":"26_CR21","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1145\/3065386","volume":"60","author":"A Krizhevsky","year":"2017","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. Commun. ACM 60(6), 84\u201390 (2017)","journal-title":"Commun. ACM"},{"key":"26_CR22","doi-asserted-by":"publisher","first-page":"69","DOI":"10.1016\/j.image.2018.03.006","volume":"69","author":"PR Lebreton","year":"2018","unstructured":"Lebreton, P.R., Raake, A.: Gbvs360, bms360, prosal: extending existing saliency prediction models from 2d to omnidirectional images. Signal Process. Image Commun. 69, 69\u201378 (2018)","journal-title":"Signal Process. Image Commun."},{"key":"26_CR23","doi-asserted-by":"publisher","first-page":"60","DOI":"10.1016\/j.image.2018.03.007","volume":"69","author":"J Ling","year":"2018","unstructured":"Ling, J., Zhang, K., Zhang, Y., Yang, D., Chen, Z.: A saliency prediction model on 360 degree images using color dictionary based sparse representation. Signal Process. Image Commun. 69, 60\u201368 (2018)","journal-title":"Signal Process. Image Commun."},{"key":"26_CR24","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1016\/j.image.2018.05.005","volume":"69","author":"R Monroy","year":"2017","unstructured":"Monroy, R., Lutz, S., Chalasani, T., Smolic, A.: Salnet360: saliency maps for omni-directional images with cnn. Signal Process. Image Commun. 69, 26\u201334 (2017)","journal-title":"Signal Process. Image Commun."},{"key":"26_CR25","unstructured":"Pan, J., et al.: Salgan: Visual saliency prediction with generative adversarial networks. ArXiv abs\/1701.01081 (2017)"},{"key":"26_CR26","doi-asserted-by":"crossref","unstructured":"Rai, Y., Callet, P.L., Guillotel, P.: Which saliency weighting for omni directional image quality assessment? 2017 Ninth International Conference on Quality of Multimedia Experience (QoMEX), pp.\u00a01\u20136 (2017)","DOI":"10.1109\/QoMEX.2017.7965659"},{"key":"26_CR27","doi-asserted-by":"crossref","unstructured":"Rai, Y., Guti\u00e9rrez, J., Le\u00a0Callet, P.: A dataset of head and eye movements for 360 degree images. In: Proceedings of the 8th ACM on Multimedia Systems Conference, p. 205\u2013210. MMSys\u201917 (2017)","DOI":"10.1145\/3083187.3083218"},{"key":"26_CR28","doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A.G., Zhu, M., Zhmoginov, A., Chen, L.C.: Mobilenetv2: inverted residuals and linear bottlenecks. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4510\u20134520 (2018)","DOI":"10.1109\/CVPR.2018.00474"},{"key":"26_CR29","doi-asserted-by":"crossref","unstructured":"Sch\u00f6lkopf, B., Platt, J., Hofmann, T.: Graph-Based Visual Saliency, pp. 545\u2013552 (2007)","DOI":"10.7551\/mitpress\/7503.003.0073"},{"key":"26_CR30","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. CoRR abs\/1409.1556 (2014)"},{"key":"26_CR31","doi-asserted-by":"publisher","first-page":"9236","DOI":"10.1109\/TMM.2023.3249481","volume":"25","author":"Y Song","year":"2023","unstructured":"Song, Y., et al.: Rinet: relative importance-aware network for fixation prediction. IEEE Trans. Multimedia 25, 9236\u20139277 (2023)","journal-title":"IEEE Trans. Multimedia"},{"key":"26_CR32","doi-asserted-by":"crossref","unstructured":"Tan, M., Chen, B., Pang, R., Vasudevan, V., Le, Q.V.: Mnasnet: Platform-aware neural architecture search for mobile. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2815\u20132823 (2018)","DOI":"10.1109\/CVPR.2019.00293"},{"key":"26_CR33","unstructured":"Worrall, D., Welling, M.: Deep scale-spaces: Equivariance over scale. In: Advances in Neural Information Processing Systems. vol.\u00a032 (2019)"},{"key":"26_CR34","doi-asserted-by":"crossref","unstructured":"Wu, B., Wan, A., Yue, X., et\u00a0al., P.H.J.: Shift: A zero flop, zero parameter alternative to spatial convolutions. 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9127\u20139135 (2017)","DOI":"10.1109\/CVPR.2018.00951"},{"key":"26_CR35","doi-asserted-by":"publisher","first-page":"2087","DOI":"10.1109\/TIP.2021.3050861","volume":"30","author":"M Xu","year":"2019","unstructured":"Xu, M., Yang, L., Tao, X., Duan, Y., Wang, Z.: Saliency prediction on omnidirectional image with generative adversarial imitation learning. IEEE Trans. Image Process. 30, 2087\u20132102 (2019)","journal-title":"IEEE Trans. Image Process."},{"key":"26_CR36","doi-asserted-by":"publisher","first-page":"889","DOI":"10.1109\/TPAMI.2015.2473844","volume":"38","author":"J Zhang","year":"2016","unstructured":"Zhang, J., Sclaroff, S.: Exploiting surroundedness for saliency detection: a Boolean map approach. IEEE Trans. Pattern Anal. Mach. Intell. 38, 889\u2013902 (2016)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-78122-3_26","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,4]],"date-time":"2024-12-04T08:13:44Z","timestamp":1733300024000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-78122-3_26"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,5]]},"ISBN":["9783031781216","9783031781223"],"references-count":36,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-78122-3_26","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,12,5]]},"assertion":[{"value":"5 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICPR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Pattern Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Kolkata","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"India","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1 December 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 December 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icpr2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icpr2024.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}