{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T15:13:48Z","timestamp":1778080428283,"version":"3.51.4"},"reference-count":62,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,10,1]]},"DOI":"10.1109\/iccv51070.2023.01413","type":"proceedings-article","created":{"date-parts":[[2024,1,15]],"date-time":"2024-01-15T15:55:59Z","timestamp":1705334159000},"page":"15359-15370","source":"Crossref","is-referenced-by-count":50,"title":["HiTeA: Hierarchical Temporal-Aware Video-Language Pre-training"],"prefix":"10.1109","author":[{"given":"Qinghao","family":"Ye","sequence":"first","affiliation":[{"name":"Alibaba Group,DAMO Academy"}]},{"given":"Guohai","family":"Xu","sequence":"additional","affiliation":[{"name":"Alibaba Group,DAMO Academy"}]},{"given":"Ming","family":"Yan","sequence":"additional","affiliation":[{"name":"Alibaba Group,DAMO Academy"}]},{"given":"Haiyang","family":"Xu","sequence":"additional","affiliation":[{"name":"Alibaba Group,DAMO Academy"}]},{"given":"Qi","family":"Qian","sequence":"additional","affiliation":[{"name":"Alibaba Group,DAMO Academy"}]},{"given":"Ji","family":"Zhang","sequence":"additional","affiliation":[{"name":"Alibaba Group,DAMO Academy"}]},{"given":"Fei","family":"Huang","sequence":"additional","affiliation":[{"name":"Alibaba Group,DAMO Academy"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.618"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref3","first-page":"4","article-title":"Is space-time attention all you need for video understanding?","volume-title":"ICML","volume":"2","author":"Bertasius"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00293"},{"key":"ref5","first-page":"190","article-title":"Collecting highly parallel data for paraphrase evaluation","volume-title":"Proceedings of the 49th annual meeting of the association for computational linguistics: human language technologies","author":"Chen"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01549"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00950"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"ref9","author":"Devlin","year":"2018","journal-title":"Bert: Pre-training of deep bidirectional transformers for language understanding"},{"key":"ref10","author":"Fu","year":"2021","journal-title":"Violet: End-to-end video-language transformers with masked visual-token mod-eling"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01569"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"key":"ref13","first-page":"21271","article-title":"Bootstrap your own latent-a new approach to self-supervised learning","volume":"33","author":"Grill","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01427"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.149"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6767"},{"key":"ref17","author":"Kingma","year":"2014","journal-title":"Adam: A method for stochastic optimization"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.83"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-021-01514-3"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.29"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00725"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.488"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00490"},{"key":"ref24","article-title":"Blip: Bootstrapping language-image pre-training for uni fied vision-language understanding and generation","volume-title":"ICML","author":"Li"},{"key":"ref25","first-page":"9694","article-title":"Align before fuse: Vision and language representation learning with momentum distillation","volume":"34","author":"Li","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.161"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02214"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00476"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01742"},{"key":"ref30","author":"Luo","year":"2020","journal-title":"Univl: A unified video and language pre-training model for multimodal understanding and generation"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2022.07.028"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547910"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.778"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00990"},{"key":"ref35","article-title":"Pytorch: An imperative style, high-performance deep learning library","volume":"32","author":"Paszke","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref36","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International Conference on Machine Learning","author":"Radford"},{"key":"ref37","author":"Ridnik","year":"2021","journal-title":"Imagenet-21k pretraining for the masses"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298940"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01570"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01743"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00756"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3479207"},{"key":"ref44","author":"Torabi","year":"2016","journal-title":"Learning language-visual embedding for movie understanding with natural-language"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00638"},{"key":"ref47","author":"Wang","year":"2021","journal-title":"Simvlm: Simple visual language model pretraining with weak supervision"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00965"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20059-5_3"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1145\/3123266.3123427"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.544"},{"key":"ref52","article-title":"mplug-2: A modularized multi-modal foundation model across text, image and video","volume-title":"Proceedings of International Conference of Machine Learning (ICML)","author":"Xu"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00171"},{"key":"ref55","author":"Yang","year":"2022","journal-title":"Zero-shot video question answering via frozen bidirectional language models"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00785"},{"key":"ref57","author":"Ye","year":"2023","journal-title":"mplug-owl: Modularization empowers large language models with multimodality"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_29"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33019127"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01589"},{"key":"ref61","first-page":"23634","article-title":"Mer-lot: Multimodal neural script knowledge models","volume":"34","author":"Zellers","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00877"}],"event":{"name":"2023 IEEE\/CVF International Conference on Computer Vision (ICCV)","location":"Paris, France","start":{"date-parts":[[2023,10,1]]},"end":{"date-parts":[[2023,10,6]]}},"container-title":["2023 IEEE\/CVF International Conference on Computer Vision (ICCV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10376473\/10376477\/10377884.pdf?arnumber=10377884","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,17]],"date-time":"2024-01-17T20:20:41Z","timestamp":1705522841000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10377884\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,1]]},"references-count":62,"URL":"https:\/\/doi.org\/10.1109\/iccv51070.2023.01413","relation":{},"subject":[],"published":{"date-parts":[[2023,10,1]]}}}