{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,12]],"date-time":"2026-03-12T15:37:30Z","timestamp":1773329850927,"version":"3.50.1"},"reference-count":36,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"NSF","award":["DGE 1752814"],"award-info":[{"award-number":["DGE 1752814"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2024,4]]},"DOI":"10.1109\/lra.2024.3368231","type":"journal-article","created":{"date-parts":[[2024,2,21]],"date-time":"2024-02-21T18:58:42Z","timestamp":1708541922000},"page":"3625-3632","source":"Crossref","is-referenced-by-count":9,"title":["Skill-Critic: Refining Learned Skills for Hierarchical Reinforcement Learning"],"prefix":"10.1109","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-7653-9713","authenticated-orcid":false,"given":"Ce","family":"Hao","sequence":"first","affiliation":[{"name":"Department of Mechanical Engineering, University of California, Berkeley, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8521-9305","authenticated-orcid":false,"given":"Catherine","family":"Weaver","sequence":"additional","affiliation":[{"name":"Department of Mechanical Engineering, University of California, Berkeley, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7536-9983","authenticated-orcid":false,"given":"Chen","family":"Tang","sequence":"additional","affiliation":[{"name":"Department of Mechanical Engineering, University of California, Berkeley, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7276-2766","authenticated-orcid":false,"given":"Kenta","family":"Kawamoto","sequence":"additional","affiliation":[{"name":"Sony Research Inc., Tokyo, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0206-6639","authenticated-orcid":false,"given":"Masayoshi","family":"Tomizuka","sequence":"additional","affiliation":[{"name":"Department of Mechanical Engineering, University of California, Berkeley, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1474-1200","authenticated-orcid":false,"given":"Wei","family":"Zhan","sequence":"additional","affiliation":[{"name":"Department of Mechanical Engineering, University of California, Berkeley, CA, USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-021-04357-7"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3190100"},{"key":"ref4","first-page":"188","article-title":"Accelerating reinforcement learning with learned skill priors","volume-title":"Proc. Conf. Robot Learn.","author":"Pertsch","year":"2021"},{"key":"ref5","first-page":"729","article-title":"Guided reinforcement learning with learned skills","volume-title":"Proc. Conf. Robot Learn.","author":"Pertsch","year":"2022"},{"key":"ref6","article-title":"Decision making for human-in-the-loop robotic agents via uncertainty-aware reinforcement learning","author":"Singi","year":"2023"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2008.10.024"},{"key":"ref8","article-title":"AWAC: Accelerating online reinforcement learning with offline datasets","author":"Nair","year":"2020"},{"key":"ref9","article-title":"Cal-QL: Calibrated offline RL pre-training for efficient online fine-tuning","author":"Nakamoto","year":"2023"},{"key":"ref10","article-title":"Auto-encoding variational bayes","author":"Kingma","year":"2013"},{"key":"ref11","article-title":"Dynamics-aware unsupervised discovery of skills","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Sharma","year":"2020"},{"key":"ref12","first-page":"2010","article-title":"DAC: The double actor-critic architecture for learning options","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Zhang","year":"2019"},{"key":"ref13","article-title":"Diversity is all you need: Learning skills without a reward function","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Eysenbach","year":"2019"},{"key":"ref14","article-title":"Learning an embedding space for transferable robot skills","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hausman","year":"2018"},{"key":"ref15","first-page":"1652","article-title":"Dont start from scratch: Leveraging prior data to automate robotic reinforcement learning","volume-title":"Proc. Conf. Robot Learn.","author":"Walke","year":"2023"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561232"},{"key":"ref17","first-page":"38600","article-title":"ASPire: Adaptive skill priors for reinforcement learning","volume-title":"Proc. 36th Conf. Neural Inf. Process. Syst","author":"Xu","year":"2022"},{"key":"ref18","article-title":"Skill-based meta-reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Nam","year":"2022"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3453160"},{"key":"ref20","first-page":"302","article-title":"Bootstrap your own skills: Learning to solve new tasks with large language model guidance","volume-title":"Proc. Conf. Robot Learn.","author":"Zhang","year":"2023"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.10916"},{"key":"ref22","article-title":"Soac: The soft option actor-critic architecture","author":"Li","year":"2020"},{"key":"ref23","first-page":"5048","article-title":"Hindsight experience replay","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Andrychowicz","year":"2017"},{"key":"ref24","first-page":"35603","article-title":"Contrastive learning as goal-conditioned reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Eysenbach","year":"2022"},{"key":"ref25","first-page":"14843","article-title":"Planning with goal-conditioned policies","volume-title":"Proc. Adv. Neural Inform. Process. Syst.","author":"Nasiriany","year":"2019"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3087733"},{"key":"ref27","first-page":"762","article-title":"Transferring hierarchical structures with dual meta imitation learning","volume-title":"Proc. Conf. Robot Learn.","author":"Gao","year":"2023"},{"key":"ref28","first-page":"2095","article-title":"Residual skill policies: Learning an adaptable skill-based action space for reinforcement learning for robotics","volume-title":"Proc. Conf. Robot Learn.","author":"Rana","year":"2023"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1145\/3528223.3530067"},{"key":"ref30","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja","year":"2018"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00052-1"},{"key":"ref32","article-title":"D4rl: Datasets for deep data-driven reinforcement learning","author":"Fu","year":"2020"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3064284"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref35","article-title":"Learning multi-level hierarchies with hindsight","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Levy","year":"2019"},{"key":"ref36","article-title":"Parrot: Data-driven behavioral priors for reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Singh","year":"2020"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielam\/7083369\/10440130\/10443024-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/10440130\/10443024.pdf?arnumber=10443024","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,8]],"date-time":"2024-03-08T02:10:09Z","timestamp":1709863809000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10443024\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4]]},"references-count":36,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/lra.2024.3368231","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4]]}}}