% pubman genre = conference-paper @inproceedings{item_2519718, title = {{Towards a platform-independent cooperative human-robot interaction system: II. Perception, execution and imitation of goal directed actions}}, author = {Lall{\'e}e, S. and Pattacini, U. and Boucher, J. D. and Lemaignan, S. and Lenz, A. and Melhuish, C. and Natale, L. and Skachek, S. and Hamann, K. and Steinwender, J. and Sisbot, E. A. and Metta, G. and Alami, R. and Warnier, M. and Guitton, J. and Warneken, F. and Dominey, P. F.}, language = {eng}, isbn = {978-1-61284-456-5}, doi = {10.1109/IROS.2011.6094744}, year = {2011}, date = {2011}, abstract = {{If robots are to cooperate with humans in an increasingly human-like manner, then significant progress must be made in their abilities to observe and learn to perform novel goal directed actions in a flexible and adaptive manner. The current research addresses this challenge. In CHRIS.I [1], we developed a platform-independent perceptual system that learns from observation to recognize human actions in a way which abstracted from the specifics of the robotic platform, learning actions including ?????????put X on Y????????? and ?????????take X?????????. In the current research, we extend this system from action perception to execution, consistent with current developmental research in human understanding of goal directed action and teleological reasoning. We demonstrate the platform independence with experiments on three different robots. In Experiments 1 and 2 we complete our previous study of perception of actions ?????????put????????? and ?????????take????????? demonstrating how the system learns to execute these same actions, along with new related actions ?????????cover????????? and ?????????uncover????????? based on the composition of action primitives ?????????grasp X????????? and ?????????release X at Y?????????. Significantly, these compositional action execution specifications learned on one iCub robot are then executed on another, based on the abstraction layer of motor primitives. Experiment 3 further validates the platform-independence of the system, as a new action that is learned on the iCub in Lyon is then executed on the Jido robot in Toulouse. In Experiment 4 we extended the definition of action perception to include the notion of agency, again inspired by developmental studies of agency attribution, exploiting the Kinect motion capture system for tracking human motion. Finally in Experiment 5 we demonstrate how the combined representation of action in terms of perception and execution provides the basis for imitation. This provides the basis for a- open ended cooperation capability where new actions can be learned and integrated into shared plans for cooperation. Part of the novelty of this research is the robots{\textquotesingle} use of spoken language understanding and visual perception to generate action representations in a platform independent manner based on physical state changes. This provides a flexible capability for goal-directed action imitation.}}, booktitle = {{2011 IEEE/RSJ International Conference on Intelligent Robots and Systems}}, pages = {2895--2902}, address = {San Francisco, CA}, note = {2011 IEEE/RSJ International Conference on Intelligent Robots and Systems}, }