{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:46:17Z","timestamp":1777657577002,"version":"3.51.4"},"reference-count":76,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,10,1]]},"DOI":"10.1109\/iccv51070.2023.01825","type":"proceedings-article","created":{"date-parts":[[2024,1,15]],"date-time":"2024-01-15T15:55:59Z","timestamp":1705334159000},"page":"19879-19890","source":"Crossref","is-referenced-by-count":39,"title":["Implicit Temporal Modeling with Learnable Alignment for Video Recognition"],"prefix":"10.1109","author":[{"given":"Shuyuan","family":"Tu","sequence":"first","affiliation":[{"name":"Fudan University,Shanghai Key Lab of Intell. Info. Processing, School of CS"}]},{"given":"Qi","family":"Dai","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia"}]},{"given":"Zuxuan","family":"Wu","sequence":"additional","affiliation":[{"name":"Fudan University,Shanghai Key Lab of Intell. Info. Processing, School of CS"}]},{"given":"Zhi-Qi","family":"Cheng","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University"}]},{"given":"Han","family":"Hu","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia"}]},{"given":"Yu-Gang","family":"Jiang","sequence":"additional","affiliation":[{"name":"Fudan University,Shanghai Key Lab of Intell. Info. Processing, School of CS"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Vatt: Transformers for multimodal self-supervised learning from raw video, audio and text","author":"Akbari","year":"2021","journal-title":"NeurIPS"},{"key":"ref2","article-title":"Wasserstein gan","author":"Arjovsky","year":"2017"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"ref4","article-title":"Layer normalization","author":"Ba","year":"2016"},{"key":"ref5","article-title":"Is space-time attention all you need for video understanding?","volume-title":"ICML","author":"Bertasius"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.444"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2016.2647386"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/BF01584237"},{"key":"ref9","article-title":"Video super-resolution transformer","author":"Cao","year":"2021"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.502"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00588"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00124"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref14","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","volume-title":"ICLR","author":"Dosovitskiy"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2023\/90"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00028"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00630"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/2964284.2964326"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547943"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-023-01891-x"},{"key":"ref22","article-title":"On the connection between local attention and dynamic depth-wise convolution","volume-title":"ICLR","author":"Han"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2017.373"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.620"},{"key":"ref25","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"ICML","author":"Jia"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00963"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19833-5_7"},{"key":"ref28","article-title":"The kinetics human action video dataset","volume-title":"CVPR","author":"Kay"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58517-4_21"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01231-1_19"},{"key":"ref31","article-title":"Uniformer: Unified transformer for efficient spatiotemporal representation learning","volume-title":"ICLR","author":"Li"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i2.20029"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00476"},{"key":"ref34","article-title":"Fdan: Flow-guided deformable alignment network for video super-resolution","author":"Lin","year":"2021"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19833-5_23"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00320"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00113"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19772-7_1"},{"key":"ref39","article-title":"St-adapter: Parameter-efficient image-to-video transfer learning for action recognition","author":"Pan","year":"2022","journal-title":"NeurIPS"},{"key":"ref40","article-title":"Keeping your eye on the ball: Trajectory attention in video transformers","author":"Patrick","year":"2021","journal-title":"NeurIPS"},{"key":"ref41","article-title":"Learning transferable visual models from natural language supervision","volume-title":"ICML","author":"Radford"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2005.335"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01755"},{"key":"ref44","article-title":"Tokenlearner: Adaptive space-time tokenization for videos","author":"Ryoo","year":"2021","journal-title":"NeurIPS"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00792"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00109"},{"key":"ref47","article-title":"Rethinking alignment in video superresolution transformers","author":"Shi","year":"2022","journal-title":"NeurIPS"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.119"},{"key":"ref49","article-title":"Two-stream convolutional networks for action recognition in videos","author":"Simonyan","year":"2014","journal-title":"NeurIPS"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02176"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00342"},{"key":"ref52","article-title":"Training data-efficient image transformers & distillation through attention","volume-title":"ICML","author":"Touvron"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.510"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00675"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"ref56","article-title":"Actionclip: A new paradigm for video action recognition","volume-title":"ECCV","author":"Wang"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00142"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2019.00247"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i4.16400"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01267-0_19"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01804"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.544"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00333"},{"key":"ref64","article-title":"Aim: Adapting image models for efficient video action recognition","volume-title":"ICLR","author":"Yang"},{"key":"ref65","article-title":"Florence: A new foundation model for computer vision","author":"Yuan","year":"2021"},{"key":"ref66","article-title":"Co-training transformer with videos and images improves action recognition","author":"Zhang","year":"2021"},{"key":"ref67","article-title":"Tip-adapter: Training-free clip-adapter for better visionlanguage modeling","volume-title":"ECCV","author":"Zhang"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00836"},{"key":"ref69","article-title":"Hivit: A simpler and more efficient design of hierarchical vision transformer","volume-title":"ICLR","author":"Zhang"},{"key":"ref70","article-title":"Alignment-guided temporal attention for video action recognition","author":"Zhao","year":"2022","journal-title":"NeurIPS"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01246-5_49"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00058"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2020.2990070"},{"key":"ref74","article-title":"Open-VCLIP: Transforming CLIP to an Open-vocabulary Video Model via Interpolated Weight Optimization","volume-title":"ICML","author":"Weng"},{"key":"ref75","article-title":"Omnivl: One foundation model for image-language and video-language tasks","author":"Wang","year":"2022","journal-title":"NeurIPS"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2020.3029425"}],"event":{"name":"2023 IEEE\/CVF International Conference on Computer Vision (ICCV)","location":"Paris, France","start":{"date-parts":[[2023,10,1]]},"end":{"date-parts":[[2023,10,6]]}},"container-title":["2023 IEEE\/CVF International Conference on Computer Vision (ICCV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10376473\/10376477\/10378436.pdf?arnumber=10378436","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,17]],"date-time":"2024-01-17T20:50:51Z","timestamp":1705524651000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10378436\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,1]]},"references-count":76,"URL":"https:\/\/doi.org\/10.1109\/iccv51070.2023.01825","relation":{},"subject":[],"published":{"date-parts":[[2023,10,1]]}}}