{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T15:35:33Z","timestamp":1778081733912,"version":"3.51.4"},"reference-count":60,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,10,1]]},"DOI":"10.1109\/iccv51070.2023.00110","type":"proceedings-article","created":{"date-parts":[[2024,1,15]],"date-time":"2024-01-15T20:55:59Z","timestamp":1705352159000},"page":"1130-1140","source":"Crossref","is-referenced-by-count":287,"title":["SegGPT: Towards Segmenting Everything In Context"],"prefix":"10.1109","author":[{"given":"Xinlong","family":"Wang","sequence":"first","affiliation":[{"name":"Beijing Academy of Artificial Intelligence"}]},{"given":"Xiaosong","family":"Zhang","sequence":"additional","affiliation":[{"name":"Beijing Academy of Artificial Intelligence"}]},{"given":"Yue","family":"Cao","sequence":"additional","affiliation":[{"name":"Beijing Academy of Artificial Intelligence"}]},{"given":"Wen","family":"Wang","sequence":"additional","affiliation":[{"name":"Zhejiang University"}]},{"given":"Chunhua","family":"Shen","sequence":"additional","affiliation":[{"name":"Zhejiang University"}]},{"given":"Tiejun","family":"Huang","sequence":"additional","affiliation":[{"name":"Beijing Academy of Artificial Intelligence"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00303"},{"key":"ref2","article-title":"Exploring visual prompts for adapting large-scale models","author":"Bahng","year":"2022"},{"key":"ref3","first-page":"1","article-title":"Visual prompting via image inpainting","author":"Bar","year":"2022","journal-title":"Adv. Neural Inform. Process. Syst"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00925"},{"key":"ref5","article-title":"Language models are few-shot learners","author":"Brown","year":"2020"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1155\/2013\/154860"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"ref8","first-page":"1","article-title":"Pix2seq: A language modeling framework for object detection","volume-title":"Int. Conf. Learn. Representations","author":"Chen"},{"key":"ref9","article-title":"A unified sequence interface for vision tasks","author":"Chen","year":"2022","journal-title":"Adv. Neural Inform. Process. Syst"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00135"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19815-1_37"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.350"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.66"},{"key":"ref14","doi-asserted-by":"crossref","DOI":"10.1109\/ICCV51070.2023.01850","article-title":"Mose: A new dataset for video object segmentation in complex scenes","author":"Ding","year":"2023"},{"key":"ref15","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","volume-title":"Int. Conf. Learn. Representations","author":"Dosovitskiy"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-009-0275-4"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19800-7_41"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TBME.2012.2205687"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.169"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19818-2_7"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/42.845178"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00324"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00916"},{"key":"ref25","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00656"},{"key":"ref27","article-title":"UViM: A unified modeling approach for vision with learned guiding codes","author":"Kolesnikov","year":"2022","journal-title":"Adv. Neural Inform. Process. Syst"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00139"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2820063"},{"key":"ref31","first-page":"3430","article-title":"Video object segmentation with adaptive feature bank and uncertain-region refinement","volume":"33","author":"Liang","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.549"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00405"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00142"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"ref37","first-page":"1","article-title":"Unified-IO: A unified model for vision, language, and multi-modal tasks","author":"Lu","year":"2022"},{"key":"ref38","article-title":"Iteratively trained interactive segmentation","volume-title":"British Machine Vision Conference (BMVC)","author":"Mahadevan"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00686"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00932"},{"key":"ref41","article-title":"The 2017 davis challenge on video object segmentation","author":"Pont-Tuset","year":"2017"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00690"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01196"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2004.825627"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.1999.784637"},{"key":"ref46","article-title":"Attention is all you need","volume":"30","author":"Vaswani","year":"2017","journal-title":"Adv. Neural Inform. Process. Syst"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58601-0_43"},{"key":"ref48","article-title":"Loveda: A remote sensing land-cover dataset for domain adaptive semantic segmentation","author":"Wang","year":"2021"},{"key":"ref49","article-title":"OFA: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework","volume-title":"Int. Conf. Mach. Learn","author":"Wang"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00660"},{"key":"ref51","first-page":"17721","article-title":"SOlOv2: Dynamic and fast instance segmentation","volume":"33","author":"Wang","year":"2020","journal-title":"Adv. Neural Inform. Process. Syst"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3111116"},{"key":"ref53","first-page":"28","article-title":"isaid: A large-scale dataset for instance segmentation in aerial images","volume-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops","author":"Zamir"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20044-1_8"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.47"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01228-1_36"},{"key":"ref57","article-title":"Feature-proxy transformer for few-shot segmentation","author":"Zhang","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.660"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-018-1140-0"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01630"}],"event":{"name":"2023 IEEE\/CVF International Conference on Computer Vision (ICCV)","location":"Paris, France","start":{"date-parts":[[2023,10,1]]},"end":{"date-parts":[[2023,10,6]]}},"container-title":["2023 IEEE\/CVF International Conference on Computer Vision (ICCV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10376473\/10376477\/10376572.pdf?arnumber=10376572","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,18]],"date-time":"2024-01-18T00:51:09Z","timestamp":1705539069000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10376572\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,1]]},"references-count":60,"URL":"https:\/\/doi.org\/10.1109\/iccv51070.2023.00110","relation":{},"subject":[],"published":{"date-parts":[[2023,10,1]]}}}