{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T15:12:02Z","timestamp":1778080322527,"version":"3.51.4"},"publisher-location":"Cham","reference-count":102,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031726699","type":"print"},{"value":"9783031726705","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72670-5_21","type":"book-chapter","created":{"date-parts":[[2024,9,29]],"date-time":"2024-09-29T07:01:50Z","timestamp":1727593310000},"page":"368-387","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":13,"title":["Navigation Instruction Generation with\u00a0BEV Perception and\u00a0Large Language Models"],"prefix":"10.1007","author":[{"given":"Sheng","family":"Fan","sequence":"first","affiliation":[]},{"given":"Rui","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Wenguan","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Yi","family":"Yang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,30]]},"reference":[{"key":"21_CR1","unstructured":"Agarwal, S., Parikh, D., Batra, D., Anderson, P., Lee, S.: Visual landmark selection for generating grounded and interpretable navigation instructions. In: CVPR Workshop (2019)"},{"key":"21_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"363","DOI":"10.1007\/3-540-63623-4_61","volume-title":"Spatial Information Theory A Theoretical Basis for GIS","author":"GL Allen","year":"1997","unstructured":"Allen, G.L.: From knowledge to words to wayfinding: Issues in the production and comprehension of route directions. In: Hirtle, S.C., Frank, A.U. (eds.) COSIT 1997. LNCS, vol. 1329, pp. 363\u2013372. Springer, Heidelberg (1997). https:\/\/doi.org\/10.1007\/3-540-63623-4_61"},{"key":"21_CR3","unstructured":"An, D., Qi, Y., Li, Y., Huang, Y., Wang, L., Tan, T., Shao, J.: Bevbert: multimodal map pre-training for language-guided navigation. In: ICCV (2023)"},{"key":"21_CR4","doi-asserted-by":"crossref","unstructured":"An, D., et al.: Etpnav: evolving topological planning for vision-language navigation in continuous environments. IEEE Trans. PAMI (2024)","DOI":"10.1109\/TPAMI.2024.3386695"},{"key":"21_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"382","DOI":"10.1007\/978-3-319-46454-1_24","volume-title":"Computer Vision \u2013 ECCV 2016","author":"P Anderson","year":"2016","unstructured":"Anderson, P., Fernando, B., Johnson, M., Gould, S.: SPICE: semantic propositional image caption evaluation. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9909, pp. 382\u2013398. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46454-1_24"},{"key":"21_CR6","doi-asserted-by":"crossref","unstructured":"Anderson, P., et al.: Vision-and-language navigation: interpreting visually-grounded navigation instructions in real environments. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00387"},{"key":"21_CR7","unstructured":"Banerjee, S., Lavie, A.: Meteor: an automatic metric for mt evaluation with improved correlation with human judgments. In: ACL Workshop (2005)"},{"key":"21_CR8","unstructured":"Baruch, G., et\u00a0al.: Arkitscenes: a diverse real-world dataset for 3d indoor scene understanding using mobile rgb-d data. In: NeurIPS (2021)"},{"key":"21_CR9","unstructured":"Brohan, A., et\u00a0al.: Rt-2: vision-language-action models transfer web knowledge to robotic control. In: CoRL (2023)"},{"key":"21_CR10","doi-asserted-by":"crossref","unstructured":"Cartillier, V., Ren, Z., Jain, N., Lee, S., Essa, I., Batra, D.: Semantic mapnet: Building allocentric semantic maps and representations from egocentric views. In: AAAI (2021)","DOI":"10.1609\/aaai.v35i2.16180"},{"key":"21_CR11","doi-asserted-by":"crossref","unstructured":"Chang, A., et al.: Matterport3D: learning from RGB-D data in indoor environments. In: 3DV (2017)","DOI":"10.1109\/3DV.2017.00081"},{"key":"21_CR12","doi-asserted-by":"crossref","unstructured":"Chen, J., Wang, W., Liu, S., Li, H., Yang, Y.: Omnidirectional information gathering for knowledge transfer-based audio-visual navigation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01009"},{"key":"21_CR13","unstructured":"Chen, P., Ji, D., Lin, K., Zeng, R., Li, T., Tan, M., Gan, C.: Weakly-supervised multi-granularity map learning for vision-and-language navigation. In: NIPS (2022)"},{"key":"21_CR14","doi-asserted-by":"crossref","unstructured":"Chen, S., Guhur, P.L., Schmid, C., Laptev, I.: History aware multimodal transformer for vision-and-language navigation. In: NeurIPS (2021)","DOI":"10.1109\/ICCV48922.2021.00166"},{"key":"21_CR15","doi-asserted-by":"publisher","unstructured":"Chen, S., Guhur, P.L., Tapaswi, M., Schmid, C., Laptev, I.: Learning from unlabeled 3d environments for vision-and-language navigation. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13699. Springer, Cham(2022). https:\/\/doi.org\/10.1007\/978-3-031-19842-7_37","DOI":"10.1007\/978-3-031-19842-7_37"},{"key":"21_CR16","doi-asserted-by":"crossref","unstructured":"Chen, S., Guhur, P.L., Tapaswi, M., Schmid, C., Laptev, I.: Think global, act local: Dual-scale graph transformer for vision-and-language navigation. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01604"},{"key":"21_CR17","doi-asserted-by":"crossref","unstructured":"Cui, Y., Xie, L., Zhang, Y., Zhang, M., Yan, Y., Yin, E.: Grounded entity-landmark adaptive pre-training for vision-and-language navigation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01106"},{"key":"21_CR18","unstructured":"Curry, A.C., Gkatzia, D., Rieser, V.: Generating and evaluating landmark-based navigation instructions in virtual environments. In: ENLG (2015)"},{"key":"21_CR19","doi-asserted-by":"crossref","unstructured":"Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nie\u00dfner, M.: Scannet: Richly-annotated 3d reconstructions of indoor scenes. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.261"},{"key":"21_CR20","unstructured":"Dai, W., et al.: Instructblip: towards general-purpose vision-language models with instruction tuning. In: NeurIPS (2023)"},{"issue":"3","key":"21_CR21","first-page":"23","volume":"36","author":"R Dale","year":"2004","unstructured":"Dale, R., Geldof, S., Prost, J.: Using natural language generation in automatic route. J. Res. Pract. Inf. Technol. 36(3), 23 (2004)","journal-title":"J. Res. Pract. Inf. Technol."},{"key":"21_CR22","unstructured":"Driess, D., et\u00a0al.: Palm-e: an embodied multimodal language model. In: ICML (2023)"},{"key":"21_CR23","doi-asserted-by":"crossref","unstructured":"Durrant-Whyte, H., Bailey, T.: Simultaneous localization and mapping: part i. IEEE Robot. Automation Mag. 13(2) (2006)","DOI":"10.1109\/MRA.2006.1638022"},{"issue":"1","key":"21_CR24","doi-asserted-by":"publisher","first-page":"83","DOI":"10.1177\/0013916581131005","volume":"13","author":"GW Evans","year":"1981","unstructured":"Evans, G.W., Marrero, D.G., Butler, P.A.: Environmental learning and cognitive mapping. Environ. Behav. 13(1), 83\u2013104 (1981)","journal-title":"Environ. Behav."},{"key":"21_CR25","doi-asserted-by":"crossref","unstructured":"Fernandes, P., et\u00a0al.: Bridging the gap: A survey on integrating (human) feedback for natural language generation. arXiv preprint arXiv:2305.00955 (2023)","DOI":"10.1162\/tacl_a_00626"},{"key":"21_CR26","doi-asserted-by":"crossref","unstructured":"Fried, D., Andreas, J., Klein, D.: Unified pragmatic models for generating and following instructions. In: NAACL (2018)","DOI":"10.18653\/v1\/N18-1177"},{"key":"21_CR27","unstructured":"Fried, D., et al.: Speaker-follower models for vision-and-language navigation. In: NeurIPS (2018)"},{"key":"21_CR28","unstructured":"Gao, P., et al.: Llama-adapter v2: parameter-efficient visual instruction model. arXiv preprint arXiv:2304.15010 (2023)"},{"key":"21_CR29","doi-asserted-by":"crossref","unstructured":"Goeddel, R., Olson, E.: Dart: a particle-based method for generating easy-to-follow directions. In: International Conference on Intelligent Robots and Systems (2012)","DOI":"10.1109\/IROS.2012.6385471"},{"key":"21_CR30","unstructured":"Grauman, K., et\u00a0al.: Ego4d: around the world in 3,000 hours of egocentric video. In: CVPR (2022)"},{"key":"21_CR31","doi-asserted-by":"crossref","unstructured":"Hao, W., Li, C., Li, X., Carin, L., Gao, J.: Towards learning a generic agent for vision-and-language navigation via pre-training. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01315"},{"key":"21_CR32","doi-asserted-by":"crossref","unstructured":"Henriques, J.F., Vedaldi, A.: Mapnet: an allocentric spatial memory for mapping environments. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00884"},{"key":"21_CR33","doi-asserted-by":"crossref","unstructured":"Hong, Y., et al.: Learning navigational visual representations with semantic map supervision. In: CVPR (2023)","DOI":"10.1109\/ICCV51070.2023.00284"},{"key":"21_CR34","unstructured":"Houlsby, N., et al.: Parameter-efficient transfer learning for nlp. In: ICML (2019)"},{"key":"21_CR35","unstructured":"Hu, E.J., et al.: LoRA: low-rank adaptation of large language models. In: ICLR (2022)"},{"key":"21_CR36","unstructured":"Huang, W., Wang, C., Zhang, R., Li, Y., Wu, J., Fei-Fei, L.: Voxposer: composable 3d value maps for robotic manipulation with language models. In: CoRL (2023)"},{"key":"21_CR37","doi-asserted-by":"publisher","unstructured":"Huang, Z., Shangguan, Z., Zhang, J., Bar, G., Boyd, M., Ohn-Bar, E.: Assister: Assistive navigation via conditional instruction generation. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds) ECCV 2022. LNCS, vol. 13696 (2022). https:\/\/doi.org\/10.1007\/978-3-031-20059-5_16","DOI":"10.1007\/978-3-031-20059-5_16"},{"key":"21_CR38","doi-asserted-by":"publisher","unstructured":"Jia, M., Tang, L., Chen, B.C., Cardie, C., Belongie, S., Hariharan, B., Lim, S.N.: Visual prompt tuning. In: ECCV (2022). https:\/\/doi.org\/10.1007\/978-3-031-19827-4_41","DOI":"10.1007\/978-3-031-19827-4_41"},{"issue":"2","key":"21_CR39","first-page":"129","volume":"2","author":"B Kuipers","year":"1978","unstructured":"Kuipers, B.: Modeling spatial knowledge. Cogn. Sci. 2(2), 129\u2013153 (1978)","journal-title":"Cogn. Sci."},{"key":"21_CR40","doi-asserted-by":"crossref","unstructured":"Kwon, O., Park, J., Oh, S.: Renderable neural radiance map for visual navigation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00878"},{"key":"21_CR41","unstructured":"Li, C., et al.: Llava-med: training a large language-and-vision assistant for biomedicine in one day. In: NeurIPS (2024)"},{"key":"21_CR42","unstructured":"Li, H., et\u00a0al.: Delving into the devils of bird\u2019s-eye-view perception: a review, evaluation and recipe. IEEE Trans. Pattern Anal. Mach. Intell. (2023)"},{"key":"21_CR43","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In: ICML (2023)"},{"key":"21_CR44","unstructured":"Li, K., et al.: Videochat: chat-centric video understanding. arXiv preprint arXiv:2305.06355 (2023)"},{"key":"21_CR45","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: Bevdepth: acquisition of reliable depth for multi-view 3d object detection. In: AAAI (2023)","DOI":"10.1609\/aaai.v37i2.25233"},{"key":"21_CR46","doi-asserted-by":"crossref","unstructured":"Li, Z., Wang, W., Li, H., Xie, E., Sima, C., Lu, T., Qiao, Y., Dai, J.: Bevformer: Learning bird\u2019s-eye-view representation from multi-camera images via spatiotemporal transformers. In: ECCV (2022). doi:","DOI":"10.1007\/978-3-031-20077-9_1"},{"key":"21_CR47","doi-asserted-by":"crossref","unstructured":"Li, Z., et al.: FB-BEV: BEV representation from forward-backward view transformations. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00637"},{"key":"21_CR48","unstructured":"Lian, D., Zhou, D., Feng, J., Wang, X.: Scaling & shifting your features: a new baseline for efficient model tuning. In: NeurIPS (2022)"},{"key":"21_CR49","unstructured":"Lin, C.Y.: Rouge: A package for automatic evaluation of summaries. Text summarization branches out, pp. 74\u201381 (2004)"},{"key":"21_CR50","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. In: NeurIPS (2024)"},{"key":"21_CR51","doi-asserted-by":"crossref","unstructured":"Liu, R., Wang, W., Yang, Y.: Volumetric environment representation for vision-language navigation. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.01544"},{"key":"21_CR52","doi-asserted-by":"crossref","unstructured":"Liu, R., Wang, X., Wang, W., Yang, Y.: Bird\u2019s-eye-view scene graph for vision-language navigation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01007"},{"key":"21_CR53","doi-asserted-by":"crossref","unstructured":"Look, G., Kottahachchi, B., Laddaga, R., Shrobe, H.: A location representation for generating descriptive walking directions. In: IUI (2005)","DOI":"10.1145\/1040830.1040862"},{"key":"21_CR54","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019)"},{"key":"21_CR55","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"65","DOI":"10.1007\/3-540-48384-5_5","volume-title":"Spatial Information Theory. Cognitive and Computational Foundations of Geographic Information Science","author":"KL Lovelace","year":"1999","unstructured":"Lovelace, K.L., Hegarty, M., Montello, D.R.: Elements of good route directions in familiar and unfamiliar environments. In: Freksa, C., Mark, D.M. (eds.) COSIT 1999. LNCS, vol. 1661, pp. 65\u201382. Springer, Heidelberg (1999). https:\/\/doi.org\/10.1007\/3-540-48384-5_5"},{"key":"21_CR56","unstructured":"Luo, G., Zhou, Y., Ren, T., Chen, S., Sun, X., Ji, R.: Cheap and quick: efficient vision-language instruction tuning for large language models. In: NeurIPS (2023)"},{"key":"21_CR57","unstructured":"Lynch, K.: The image of the city. MIT press (1964)"},{"key":"21_CR58","unstructured":"Ma, Y., et al.: Vision-centric bev perception: A survey. arXiv preprint arXiv:2208.02797 (2022)"},{"key":"21_CR59","doi-asserted-by":"crossref","unstructured":"Maaz, M., Rasheed, H., Khan, S., Khan, F.S.: Video-chatgpt: towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424 (2023)","DOI":"10.18653\/v1\/2024.acl-long.679"},{"key":"21_CR60","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"405","DOI":"10.1007\/978-3-030-58452-8_24","volume-title":"Computer Vision \u2013 ECCV 2020","author":"B Mildenhall","year":"2020","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: representing scenes as neural radiance fields for view synthesis. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 405\u2013421. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_24"},{"key":"21_CR61","unstructured":"Moudgil, A., Majumdar, A., Agrawal, H., Lee, S., Batra, D.: Soat: A scene-and object-aware transformer for vision-and-language navigation. In: NeurIPS (2021)"},{"key":"21_CR62","unstructured":"OpenAI: Gpt-4 technical report (2023)"},{"key":"21_CR63","doi-asserted-by":"crossref","unstructured":"Pan, L., Saxon, M., Xu, W., Nathani, D., Wang, X., Wang, W.Y.: Automatically correcting large language models: surveying the landscape of diverse self-correction strategies. arXiv preprint arXiv:2308.03188 (2023)","DOI":"10.1162\/tacl_a_00660"},{"key":"21_CR64","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: Bleu: a method for automatic evaluation of machine translation. In: ACL (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"21_CR65","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"194","DOI":"10.1007\/978-3-030-58568-6_12","volume-title":"Computer Vision \u2013 ECCV 2020","author":"J Philion","year":"2020","unstructured":"Philion, J., Fidler, S.: Lift, splat, shoot: encoding images from arbitrary camera rigs by implicitly unprojecting to 3D. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12359, pp. 194\u2013210. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58568-6_12"},{"key":"21_CR66","doi-asserted-by":"crossref","unstructured":"Pi, R., et al.: Detgpt: detect what you need via reasoning. In: EMNLP (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.876"},{"key":"21_CR67","doi-asserted-by":"crossref","unstructured":"Qi, Y., Wu, Q., Anderson, P., Wang, X., Wang, W.Y., Shen, C., van\u00a0den Hengel, A.: Reverie: Remote embodied visual referring expression in real indoor environments. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01000"},{"key":"21_CR68","doi-asserted-by":"crossref","unstructured":"Reading, C., Harakeh, A., Chae, J., Waslander, S.L.: Categorical depth distributionnetwork for monocular 3d object detection. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00845"},{"key":"21_CR69","unstructured":"Richter, K.F., Duckham, M.: Simplest instructions: Finding easy-to-describe routes for navigation. In: Geographic Information Science: 5th International Conference, GIScience 2008, Park City, UT, USA, September 23-26, 2008. Proceedings 5 (2008)"},{"key":"21_CR70","doi-asserted-by":"crossref","unstructured":"Schuster, S., Krishna, R., Chang, A., Fei-Fei, L., Manning, C.D.: Generating semantically precise scene graphs from textual descriptions for improved image retrieval. In: Proceedings of the Fourth Workshop on Vision and Language (2015)","DOI":"10.18653\/v1\/W15-2812"},{"key":"21_CR71","unstructured":"Su, Y., Lan, T., Li, H., Xu, J., Wang, Y., Cai, D.: Pandagpt: One model to instruction-follow them all. In: Taming Large Language Models (TLLM) (2023)"},{"key":"21_CR72","doi-asserted-by":"crossref","unstructured":"Sung, Y.L., Cho, J., Bansal, M.: Vl-adapter: Parameter-efficient transfer learning for vision-and-language tasks. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00516"},{"key":"21_CR73","doi-asserted-by":"crossref","unstructured":"Tan, H., Yu, L., Bansal, M.: Learning to navigate unseen environments: Back translation with environmental dropout. In: NAACL (2019)","DOI":"10.18653\/v1\/N19-1268"},{"key":"21_CR74","unstructured":"Touvron, H., et\u00a0al.: Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"issue":"6","key":"21_CR75","doi-asserted-by":"publisher","first-page":"667","DOI":"10.1177\/0013916588206001","volume":"20","author":"EJ Vanetti","year":"1988","unstructured":"Vanetti, E.J., Allen, G.L.: Communicating environmental knowledge: the impact of verbal and spatial abilities on the production and comprehension of route directions. Environ. Behav. 20(6), 667\u2013682 (1988)","journal-title":"Environ. Behav."},{"key":"21_CR76","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeurIPS (2017)"},{"key":"21_CR77","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Lawrence\u00a0Zitnick, C., Parikh, D.: Cider: consensus-based image description evaluation. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7299087"},{"issue":"5","key":"21_CR78","doi-asserted-by":"publisher","first-page":"910","DOI":"10.3758\/BF03193465","volume":"35","author":"D Waller","year":"2007","unstructured":"Waller, D., Lippa, Y.: Landmarks as beacons and associative cues: their role in route learning. Memory Cogn. 35(5), 910\u2013924 (2007)","journal-title":"Memory Cogn."},{"key":"21_CR79","unstructured":"Wang, H., Liang, W., Gool, L.V., Wang, W.: Towards versatile embodied navigation. In: NeurIPS (2022)"},{"key":"21_CR80","doi-asserted-by":"crossref","unstructured":"Wang, H., Liang, W., Shen, J., Van\u00a0Gool, L., Wang, W.: Counterfactual cycle-consistent learning for instruction following and generation in vision-language navigation. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01503"},{"key":"21_CR81","doi-asserted-by":"crossref","unstructured":"Wang, H., Liang, W., Van\u00a0Gool, L., Wang, W.: Dreamwalker: mental planning for continuous vision-language navigation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00998"},{"issue":"3","key":"21_CR82","first-page":"607","volume":"131","author":"H Wang","year":"2023","unstructured":"Wang, H., Wang, W., Liang, W., Hoi, S.C., Shen, J., Gool, L.V.: Active perception for visual-language navigation. IJCV 131(3), 607\u2013625 (2023)","journal-title":"Active perception for visual-language navigation. IJCV"},{"key":"21_CR83","doi-asserted-by":"crossref","unstructured":"Wang, H., Wang, W., Liang, W., Xiong, C., Shen, J.: Structured scene memory for vision-language navigation. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00835"},{"key":"21_CR84","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"307","DOI":"10.1007\/978-3-030-58542-6_19","volume-title":"Computer Vision \u2013 ECCV 2020","author":"H Wang","year":"2020","unstructured":"Wang, H., Wang, W., Shu, T., Liang, W., Shen, J.: Active visual information gathering for vision-language navigation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12367, pp. 307\u2013322. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58542-6_19"},{"key":"21_CR85","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2023.107487","volume":"128","author":"L Wang","year":"2024","unstructured":"Wang, L., et al.: Pasts: Progress-aware spatio-temporal transformer speaker for vision-and-language navigation. Eng. Appl. Artif. Intell. 128, 107487 (2024)","journal-title":"Eng. Appl. Artif. Intell."},{"key":"21_CR86","doi-asserted-by":"crossref","unstructured":"Wang, S., et al.: Less is more: Generating grounded navigation instructions from landmarks. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01499"},{"key":"21_CR87","unstructured":"Wang, W., et\u00a0al.: Visionllm: large language model is also an open-ended decoder for vision-centric tasks. In: NeurIPS (2024)"},{"key":"21_CR88","doi-asserted-by":"crossref","unstructured":"Wang, X., Wang, W., Shao, J., Yang, Y.: Lana: a language-capable navigator for instruction following and generation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01826"},{"key":"21_CR89","doi-asserted-by":"crossref","unstructured":"Wang, X., Wang, W., Shao, J., Yang, Y.: Learning to follow and generate instructions for language-capable navigation. IEEE Trans. PAMI (2023)","DOI":"10.1109\/TPAMI.2023.3341828"},{"key":"21_CR90","unstructured":"Wang, Y., Guizilini, V., Zhang, T., Wang, Y., Zhao, H., Solomon, J.: DETR3D: 3d object detection from multi-view images via 3d-to-2d queries. In: Conference on Robot Learning (2021)"},{"key":"21_CR91","doi-asserted-by":"crossref","unstructured":"Wang, Z., Li, X., Yang, J., Liu, Y., Jiang, S.: Gridmm: grid memory map for vision-and-language navigation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01432"},{"issue":"2","key":"21_CR92","doi-asserted-by":"publisher","first-page":"192","DOI":"10.1177\/0013916586182003","volume":"18","author":"SL Ward","year":"1986","unstructured":"Ward, S.L., Newcombe, N., Overton, W.F.: Turn left at the church, or three miles north: a study of direction giving and sex differences. Environ. Behav. 18(2), 192\u2013213 (1986)","journal-title":"Environ. Behav."},{"issue":"8","key":"21_CR93","doi-asserted-by":"publisher","first-page":"1087","DOI":"10.1007\/s10514-023-10139-z","volume":"47","author":"J Wu","year":"2023","unstructured":"Wu, J.: Tidybot: personalized robot assistance with large language models. Auton. Robot. 47(8), 1087\u20131102 (2023)","journal-title":"Auton. Robot."},{"key":"21_CR94","doi-asserted-by":"crossref","unstructured":"Yang, J., et\u00a0al.: Octopus: Embodied vision-language programmer from environmental feedback. arXiv preprint arXiv:2310.08588 (2023)","DOI":"10.1007\/978-3-031-73232-4_2"},{"key":"21_CR95","unstructured":"Yang, Z., Chen, G., Li, X., Wang, W., Yang, Y.: Doraemongpt: toward understanding dynamic scenes with large language models (exemplified as a video agent). In: ICML (2024)"},{"key":"21_CR96","unstructured":"Yin, Z., et\u00a0al.: Lamm: language-assisted multi-modal instruction-tuning dataset, framework, and benchmark. In: NeurIPS (2024)"},{"key":"21_CR97","unstructured":"Zeng, H., Wang, X., Wang, W., Yang, Y.: Kefa: a knowledge enhanced and fine-grained aligned speaker for navigation instruction generation. arXiv preprint arXiv:2307.13368 (2023)"},{"key":"21_CR98","doi-asserted-by":"crossref","unstructured":"Zhang, H., Li, X., Bing, L.: Video-llama: an instruction-tuned audio-visual language model for video understanding. In: EMNLP (2023)","DOI":"10.18653\/v1\/2023.emnlp-demo.49"},{"key":"21_CR99","unstructured":"Zhang, R., et al.: Llama-adapter: fficient fine-tuning of language models with zero-init attention. In: ICLR (2024)"},{"key":"21_CR100","doi-asserted-by":"crossref","unstructured":"Zheng, Z., Wang, W., Qi, S., Zhu, S.C.: Reasoning visual dialogs with structural and partial observations. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00683"},{"key":"21_CR101","unstructured":"Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: Minigpt-4: enhancing vision-language understanding with advanced large language models. In: ICLR (2024)"},{"key":"21_CR102","unstructured":"Zhu, X., Su, W., Lu, L., Li, B., Wang, X., Dai, J.: Deformable detr: deformable transformers for end-to-end object detection. In: ICLR (2020)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72670-5_21","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,29]],"date-time":"2024-09-29T07:23:25Z","timestamp":1727594605000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72670-5_21"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,30]]},"ISBN":["9783031726699","9783031726705"],"references-count":102,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72670-5_21","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,9,30]]},"assertion":[{"value":"30 September 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}