@inbook{0c8ee5a2e4da45809fad579ecf0eeab7,
title = "Methods for learning control policies from variable-constraint demonstrations",
abstract = "Many everyday human skills can be framed in terms of performing some task subject to constraints imposed by the task or the environment. Constraints are usually not observable and frequently change between contexts. In this chapter, we explore the problem of learning control policies from data containing variable, dynamic and non-linear constraints on motion. We discuss how an effective approach for doing this is to learn the unconstrained policy in a way that is consistent with the constraints. We then go on to discuss several recent algorithms for extracting policies from movement data, where observations are recorded under variable, unknown constraints. We review a number of experiments testing the performance of these algorithms and demonstrating how the resultant policy models generalise over constraints allowing prediction of behaviour under unseen settings where new constraints apply. ",
author = "M. Howard and S. Klanke and M. Gienger and C. Goerick and S. Vijayakumar",
year = "2010",
doi = "10.1007/978-3-642-05181-4_12",
language = "English",
isbn = "978-3-642-05180-7",
series = "Studies in Computational Intelligence",
publisher = "Springer",
pages = "253--291",
booktitle = "From Motor Learning to Interaction Learning in Robots",
address = "United Kingdom",
}