{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,23]],"date-time":"2026-04-23T11:04:02Z","timestamp":1776942242161,"version":"3.51.4"},"reference-count":45,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2022,10,1]],"date-time":"2022-10-01T00:00:00Z","timestamp":1664582400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100021856","name":"PRIN 2017 PREVUE: \u201cPRediction of activities and Events by Vision in an Urban Environment,\u201d through the Italian Ministry of Education, University and Research","doi-asserted-by":"publisher","award":["2017N2RK7K"],"award-info":[{"award-number":["2017N2RK7K"]}],"id":[{"id":"10.13039\/501100021856","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Intell. Transport. Syst."],"published-print":{"date-parts":[[2022,10]]},"DOI":"10.1109\/tits.2022.3160673","type":"journal-article","created":{"date-parts":[[2022,5,26]],"date-time":"2022-05-26T19:36:00Z","timestamp":1653593760000},"page":"19817-19826","source":"Crossref","is-referenced-by-count":55,"title":["An End-to-End Curriculum Learning Approach for Autonomous Driving Scenarios"],"prefix":"10.1109","volume":"23","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0399-8836","authenticated-orcid":false,"given":"Luca","family":"Anzalone","sequence":"first","affiliation":[{"name":"Department of Physics and Astronomy (DIFA), University of Bologna, Bologna, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7692-0626","authenticated-orcid":false,"given":"Paola","family":"Barra","sequence":"additional","affiliation":[{"name":"Department of Computer Science, Sapienza University of Rome, Rome, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4042-3000","authenticated-orcid":false,"given":"Silvio","family":"Barra","sequence":"additional","affiliation":[{"name":"Department of Electrical and Information Technology Engineering (DIETI), University of Naples &#x201C;Federico II&#x201D;, Naples, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0571-1074","authenticated-orcid":false,"given":"Aniello","family":"Castiglione","sequence":"additional","affiliation":[{"name":"Department of Science and Technology (DIST), University of Naples &#x201C;Parthenope&#x201D;, Naples, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2517-2867","authenticated-orcid":false,"given":"Michele","family":"Nappi","sequence":"additional","affiliation":[{"name":"Department of Computer Science, University of Salerno, Salerno, Italy"}]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01267-0_15"},{"key":"ref38","article-title":"Playing atari with deep reinforcement learning","author":"mnih","year":"2013","journal-title":"arXiv 1312 5602"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992696"},{"key":"ref32","first-page":"834","article-title":"Improving stochastic policy gradients in continuous control with deep reinforcement learning using the beta distribution","author":"chou","year":"2017","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref31","volume":"1","author":"goodfellow","year":"2016","journal-title":"Deep Learning"},{"key":"ref30","article-title":"High-dimensional continuous control using generalized advantage estimation","author":"schulman","year":"2015","journal-title":"arXiv 1506 02438 [cs]"},{"key":"ref37","article-title":"OpenAI gym","author":"brockman","year":"2016","journal-title":"arXiv 1606 01540 [cs]"},{"key":"ref36","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref35","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv 1412 6980"},{"key":"ref34","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-67361-5_40"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1179"},{"key":"ref11","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"arXiv 1707 06347"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553380"},{"key":"ref13","article-title":"End to end learning for self-driving cars","author":"bojarski","year":"2016","journal-title":"arXiv 1604 07316 [cs]"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_27"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref17","article-title":"Conditional affordance learning for driving in urban environments","author":"sauer","year":"2018","journal-title":"arXiv 1806 06498"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00886"},{"key":"ref19","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv 1509 02971"},{"key":"ref28","author":"wymann","year":"2000","journal-title":"TORCS The Open Racing Car Simulator"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.2983149"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2018.8569938"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1002\/rob.21918"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00942"},{"key":"ref29","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460487"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793742"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_36"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.3390\/machines5010006"},{"key":"ref9","article-title":"CARLA: An open urban driving simulator","author":"dosovitskiy","year":"2017","journal-title":"arXiv 1711 03938"},{"key":"ref1","year":"2014","journal-title":"International On-Road Automated Vehicle Standards Committee"},{"key":"ref20","article-title":"Prioritized experience replay","author":"schaul","year":"2015","journal-title":"arXiv 1511 05952"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP42928.2021.9506673"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2019.8917306"},{"key":"ref21","article-title":"Auto-encoding variational Bayes","author":"kingma","year":"2013","journal-title":"arXiv 1312 6114"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2017.12.012"},{"key":"ref24","article-title":"Addressing function approximation error in actor-critic methods","author":"fujimoto","year":"2018","journal-title":"arXiv 1802 09477"},{"key":"ref41","first-page":"116","article-title":"ShuffleNet V2: Practical guidelines for efficient CNN architecture design","author":"ma","year":"2018","journal-title":"Proc Eur Conf Comput Vis (ECCV)"},{"key":"ref23","article-title":"Deep reinforcement learning with double Q-learning","author":"van hasselt","year":"2015","journal-title":"arXiv 1509 06461 [cs]"},{"key":"ref44","first-page":"265","article-title":"Tensorflow: A system for large-scale machine learning","author":"abadi","year":"2016","journal-title":"Proc of USENIX Symp on Operating Systems Design and Implementation (OSDI)"},{"key":"ref26","article-title":"Urban driving with multi-objective deep reinforcement learning","author":"li","year":"2018","journal-title":"arXiv 1811 08586"},{"key":"ref43","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","author":"ioffe","year":"2015","journal-title":"arXiv 1502 03167"},{"key":"ref25","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"haarnoja","year":"2018","journal-title":"arXiv 1801 01290"}],"container-title":["IEEE Transactions on Intelligent Transportation Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6979\/9916643\/09782734.pdf?arnumber=9782734","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,5]],"date-time":"2022-12-05T22:42:25Z","timestamp":1670280145000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9782734\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10]]},"references-count":45,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/tits.2022.3160673","relation":{},"ISSN":["1524-9050","1558-0016"],"issn-type":[{"value":"1524-9050","type":"print"},{"value":"1558-0016","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,10]]}}}