@COMMENT This file was generated by bib2html.pl version 0.90 @COMMENT written by Patrick Riley @COMMENT This file came from Peter Stone's publication pages at @COMMENT http://www.cs.utexas.edu/~pstone/papers @article{RAL20-pavse, author = {Brahma Pavse and Faraz Torabi and Josiah Hanna and Garrett Warnell and Peter Stone}, title = {RIDM: Reinforced Inverse Dynamics Modeling for Learning from a Single Observed Demonstration}, Journal = {{IEEE} Robotics and Automation Letters (RA-L)}, wwwnote={Presented at International Conference on Intelligent Robots and Systems ({IROS})\\ A preliminary version was presented at the Imitation, Intent, and Interaction (I3) Workshop at ICML 2019}, year = {2020}, month = {October}, volume= {5}, issue={4}, pages="6262--69", issn="2377-3766", doi="10.1109/LRA.2020.3010750", wwwnote={Video of the experiments; 13-minute video presentation.}, abstract = { Augmenting reinforcement learning with imitation learning is often hailed as a method by which to improve upon learning from scratch. However, most existing methods for integrating these two techniques are subject to several strong assumptions---chief among them that information about demonstrator actions is available. In this paper, we investigate the extent to which this assumption is necessary by introducing and evaluating reinforced inverse dynamics modeling (RIDM), a novel paradigm for combining imitation from observation (IfO) and reinforcement learning with no dependence on demonstrator action information. Moreover, RIDM requires only a single demonstration trajectory and is able to operate directly on raw (unaugmented) state features. We find experimentally that RIDM performs favorably compared to a baseline approach for several tasks in simulation as well as for tasks on a real UR5 robot arm. Experiment videos can be found at https://sites.google.com/view/ridm-reinforced-inverse-dynami. }, }