{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T16:31:55Z","timestamp":1775665915565,"version":"3.50.1"},"reference-count":46,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2023,10]]},"DOI":"10.1109\/tpami.2023.3282019","type":"journal-article","created":{"date-parts":[[2023,6,9]],"date-time":"2023-06-09T17:24:39Z","timestamp":1686331479000},"page":"12714-12720","source":"Crossref","is-referenced-by-count":12,"title":["What Makes for Hierarchical Vision Transformer?"],"prefix":"10.1109","volume":"45","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7303-8550","authenticated-orcid":false,"given":"Yuxin","family":"Fang","sequence":"first","affiliation":[{"name":"School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6732-7823","authenticated-orcid":false,"given":"Xinggang","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8891-5488","authenticated-orcid":false,"given":"Rui","family":"Wu","sequence":"additional","affiliation":[{"name":"Horizon Robotics, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4582-7488","authenticated-orcid":false,"given":"Wenyu","family":"Liu","sequence":"additional","affiliation":[{"name":"School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00069"},{"key":"ref35","article-title":"Multi-head or single-head? An empirical comparison for transformer training","author":"liu","year":"2021"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00813"},{"key":"ref34","first-page":"6575","article-title":"VOLO: Vision outlooker for visual recognition","volume":"45","author":"yuan","year":"2023","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.479"},{"key":"ref37","first-page":"9204","article-title":"Pay attention to MLPs","author":"liu","year":"2021","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-005-6642-x"},{"key":"ref36","article-title":"Are sixteen heads really better than one?","author":"michel","year":"2019","journal-title":"Proc 33rd Int Conf Neural Inf Process Syst"},{"key":"ref31","article-title":"MLP-mixer: An all-MLP architecture for vision","author":"tolstikhin","year":"2021","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref30","article-title":"Deep neural networks for large vocabulary handwritten text recognition","author":"bluche","year":"2015"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref33","first-page":"5436","article-title":"Beyond self-attention: External attention using two linear layers for visual tasks","volume":"45","author":"guo","year":"2023","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"ref10","article-title":"Very deep convolutional networks for large-scale image recognition","author":"simonyan","year":"2015","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref32","doi-asserted-by":"crossref","first-page":"5314","DOI":"10.1109\/TPAMI.2022.3206148","article-title":"ResMLP: Feedforward networks for image classification with data-efficient training","volume":"45","author":"touvron","year":"2023","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01270"},{"key":"ref17","first-page":"6000","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Proc 31st Int Conf Neural Inf Process Syst"},{"key":"ref39","article-title":"PyTorch image models","author":"wightman","year":"2019"},{"key":"ref16","first-page":"10347","article-title":"Training data-efficient image transformers & distillation through attention","author":"touvron","year":"2021","journal-title":"Proc 38th Int Conf Mach Learn"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"ref18","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"dosovitskiy","year":"2021","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref24","article-title":"On the connection between local attention and dynamic depth-wise convolution","author":"han","year":"2022","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref46","article-title":"Gaussian error linear units (GELUs)","author":"hendrycks","year":"2016"},{"key":"ref23","article-title":"Aggregating nested transformers","author":"zhang","year":"2021"},{"key":"ref45","article-title":"MobileNets: Efficient convolutional neural networks for mobile vision applications","author":"howard","year":"2017"},{"key":"ref26","first-page":"3965","article-title":"CoAtNet: Marrying convolution and attention for all data sizes","author":"dai","year":"2021","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00983"},{"key":"ref20","article-title":"Transformer in transformer","author":"han","year":"2021","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00367"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01172"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.5244\/C.30.87"},{"key":"ref21","article-title":"Patches are all you need?","author":"trockman","year":"2022"},{"key":"ref43","article-title":"MMDetection: Open MMLab detection toolbox and benchmark","author":"chen","year":"2019"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2003.1227801"},{"key":"ref27","doi-asserted-by":"crossref","DOI":"10.1007\/978-3-642-35289-8_31","article-title":"Deep big multilayer perceptrons for digit recognition","author":"cire?an","year":"2012","journal-title":"Neural Networks Tricks of the Trade"},{"key":"ref29","article-title":"Extraction de s&#x00E9;quences num&#x00E9;riques dans des documents manuscrits quelconques","author":"chatelain","year":"2006"},{"key":"ref8","article-title":"Layer normalization","author":"ba","year":"2016"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref9","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Proc 25th Int Conf Neural Inf Process Syst"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01175"},{"key":"ref5","article-title":"Shuffle transformer: Rethinking spatial shuffle for vision transformer","author":"huang","year":"2021"},{"key":"ref40","article-title":"Global filter networks for image classification","author":"rao","year":"2021","journal-title":"Proc Int Conf Neural Inf Process"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/34\/10241246\/10147250.pdf?arnumber=10147250","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,4]],"date-time":"2025-04-04T19:32:18Z","timestamp":1743795138000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10147250\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10]]},"references-count":46,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2023.3282019","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,10]]}}}