Motion planning is the problem of determining how to get the robot from one point to another. Ideally, robots should have past experiences, of their own and others, inform future actions to operate more robustly and improve their performance over time. Motion planning, as it is largely practiced today, focuses on solving one problem at a time and makes limited use of past history. The goal of this project is to transform the way robots plan their motions by learning to exploit similarities between different experiences and by creating strategies that can adapt to wide range of scenarios. The work will create a bridge between the motion planning community and the information retrieval community, potentially transforming both fields. Training opportunities for diverse students will be offered. All developed software is disseminated under an open source license and infrastructure will enable other researchers to use the experience databases and contribute to them. This project provides a two-pronged approach to transform motion planning using an experience database. First, hashing will be used on an environment to fetch roadmaps for similar environments from a database. A roadmap is a graph representing feasible motions for a robot. These fetched roadmaps will be then lazily composed and refined to allow the robot to plan efficiently in the current environment. The use of prior experience will be done in tandem with planning from scratch; the latter, if successful, can provide a path and add to the experience database. The second prong in the planned approach will be to maintain various performance characteristics of a library of motion planning algorithms. These characteristics will be then used to optimize algorithm performance and construct a portfolio of algorithms that is competitive across various problems. The overall framework will be implemented in the cloud.
This work has been supported by grant NSF RI 1718478.
@inproceedings{shome2023privacy,
author = {Shome, Rahul and Kingston, Zachary and Kavraki, Lydia E.},
title = {Robots as {AI} Double Agents: Privacy in Motion Planning},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
year = {2023},
pages = {2861--2868},
abstract = {Robotics and automation are poised to change the landscape of home and work in the
near future. Robots are adept at deliberately moving, sensing, and interacting with their
environments. The pervasive use of this technology promises societal and economic payoffs
due to its capabilities - conversely, the capabilities of robots to move within and sense
the world around them is susceptible to abuse. Robots, unlike typical sensors, are
inherently autonomous, active, and deliberate. Such automated agents can become AI double
agents liable to violate the privacy of coworkers, privileged spaces, and other
stakeholders. In this work we highlight the understudied and inevitable threats to privacy
that can be posed by the autonomous, deliberate motions and sensing of robots. We frame the
problem within broader sociotechnological questions alongside a comprehensive review. The
privacy-aware motion planning problem is formulated in terms of cost functions that can be
modified to induce privacy-aware behavior - preserving, agnostic, or violating. Simulated
case studies in manipulation and navigation, with altered cost functions, are used to
demonstrate how privacy-violating threats can be easily injected, sometimes with only small
changes in performance (solution path lengths). Such functionality is already widely
available. This preliminary work is meant to lay the foundations for near-future, holistic,
interdisciplinary investigations that can address questions surrounding privacy in
intelligent robotic behaviors determined by planning algorithms.},
url = {https://ieeexplore.ieee.org/document/10341460},
doi = {10.1109/IROS55552.2023.10341460}
}
@inproceedings{kingston2022-robowflex,
abstract = {Robowflex is a software library for robot motion planning in industrial and research
applications, leveraging the popular MoveIt library and Robot Operating System (ROS)
middleware. Robowflex takes advantage of the ease of motion planning with MoveIt while
providing an augmented API to craft and manipulate motion planning queries within a single
program. Robowflex's high-level API simplifies many common use-cases while still providing
access to the underlying MoveIt library. Robowflex is particularly useful for 1) developing
new motion planners, 2) evaluation of motion planners, and 3) complex problems that use
motion planning (e.g., task and motion planning). Robowflex also provides visualization
capabilities, integrations to other robotics libraries (e.g., DART and Tesseract), and is
complimentary to many other robotics packages. With our library, the user does not need to
be an expert at ROS or MoveIt in order to set up motion planning queries, extract
information from results, and directly interface with a variety of software components. We
provide a few example use-cases that demonstrate its efficacy.},
author = {Kingston, Zachary and Kavraki, Lydia E.},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems},
title = {Robowflex: Robot Motion Planning with MoveIt Made Easy},
year = {2022},
month = oct,
pages = {3108--3114},
doi = {10.1109/IROS47612.2022.9981698}
}
@inproceedings{quintero-chamzas2022-blind,
title = {Human-Guided Motion Planning in Partially Observable Environments},
author = {Quintero-Pe{\~n}a, Carlos and Chamzas, Constantinos and Sun, Zhanyi and Unhelkar, Vaibhav and Kavraki, Lydia E},
booktitle = {2022 International Conference on Robotics and Automation (ICRA)},
month = may,
year = {2022},
pages = {7226--7232},
doi = {10.1109/ICRA46639.2022.9811893},
abstract = {Motion planning is a core problem in robotics, with a range of existing methods
aimed to address its diverse set of challenges. However, most existing methods rely on
complete knowledge of the robot environment; an assumption that seldom holds true due to
inherent limitations of robot perception. To enable tractable motion planning for high-DOF
robots under partial observability, we introduce BLIND, an algorithm that leverages human
guidance. BLIND utilizes inverse reinforcement learning to derive motion-level guidance from
human critiques. The algorithm overcomes the computational challenge of reward learning for
high-DOF robots by projecting the robot’s continuous configuration space to a
motion-planner-guided discrete task model. The learned reward is in turn used as guidance to
generate robot motion using a novel motion planner. We demonstrate BLIND using the Fetch
robot an dperform two simulation experiments with partial observability. Our experiments
demonstrate that, despite the challenge of partial observability and high dimensionality,
BLIND is capable of generating safe robot motion and outperforms baselines on metrics of
teaching efficiency, success rate, and path quality.},
keyword = {uncertainty},
publisher = {IEEE}
}
@article{chamzas2022-learn-retrieve,
title = {Learning to Retrieve Relevant Experiences for Motion Planning},
author = {Chamzas, Constantinos and Cullen, Aedan and Shrivastava, Anshumali and E. Kavraki, Lydia},
booktitle = {2022 International Conference on Robotics and Automation (ICRA)},
month = may,
year = {2022},
pages = {7233--7240},
doi = {10.1109/ICRA46639.2022.9812076},
abstract = {Recent work has demonstrated that motion planners’ performance can be
significantly improved by retrieving past experiences from a database. Typically, the
experience database is queried for past similar problems using a similarity function defined
over the motion planning problems. However, to date, most works rely on simple hand-crafted
similarity functions and fail to generalize outside their corresponding training dataset. To
address this limitation, we propose (FIRE), aframework that extracts local representations
of planning problems and learns a similarity function over them. To generate the training
data we introduce a novel self-supervised method that identifies similar and dissimilar
pairs of local primitives from past solution paths. With these pairs, a Siamese network is
trained with the contrastive loss and the similarity function is realized in the network’s
latent space. We evaluate FIRE on an 8-DOF manipulator in five categories of motion planning
problems with sensed environments. Our experiments show that FIRE retrieves relevant
experiences which can informatively guide sampling-based planners even in problems outside
its training distribution, outperforming other baselines.},
keyword = {fundamentals of sampling-based motion planning},
publisher = {IEEE}
}
@article{chamzas2022-motion-bench-maker,
title = {MotionBenchMaker: A Tool to Generate and Benchmark Motion Planning Datasets},
author = {Chamzas, Constantinos and Quintero-Pe{\~n}a, Carlos and Kingston, Zachary and Orthey, Andreas and Rakita, Daniel and Gleicher, Michael and Toussaint, Marc and E. Kavraki, Lydia},
journal = {IEEE Robotics and Automation Letters},
month = apr,
year = {2022},
volume = {7},
number = {2},
pages = {882--889},
doi = {10.1109/LRA.2021.3133603},
abstract = {Recently, there has been a wealth of development in motion planning for robotic
manipulationnew motion planners are continuously proposed, each with its own unique set of
strengths and weaknesses. However, evaluating these new planners is challenging, and
researchers often create their own ad-hoc problems for benchmarking, which is
time-consuming, prone to bias, and does not directly compare against other state-of-the-art
planners. We present MotionBenchMaker, an open-source tool to generate benchmarking datasets
for realistic robot manipulation problems. MotionBenchMaker is designed to be an extensible,
easy-to-use tool that allows users to both generate datasets and benchmark them by comparing
motion planning algorithms. Empirically, we show the benefit of using MotionBenchMaker as a
tool to procedurally generate datasets which helps in the fair evaluation of planners. We
also present a suite of over 40 prefabricated datasets, with 5 different commonly used
robots in 8 environments, to serve as a common ground for future motion planning research.},
issn = {2377-3766},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
url = {https://dx.doi.org/10.1109/LRA.2021.3133603}
}
@inproceedings{sobti2021-complex-motor-actions,
title = {{A Sampling-based Motion Planning Framework for Complex Motor Actions}},
author = {Sobti, Shlok and Shome, Rahul and Chaudhuri, Swarat and Kavraki, Lydia E.},
booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent Robots and
Systems},
month = sep,
year = {2021},
pages = {6928--6934},
doi = {10.1109/IROS51168.2021.9636395},
abstract = {We present a framework for planning complex motor actions such as pouring or
scooping from arbitrary start states in cluttered real-world scenes. Traditional approaches
to such tasks use dynamic motion primitives (DMPs) learned from human demonstrations. We
enhance a recently proposed state-of-the-art DMP technique capable of obstacle avoidance by
including them within a novel hybrid framework. This complements DMPs with sampling-based
motion planning algorithms, using the latter to explore the scene and reach promising
regions from which a DMP can successfully complete the task. Experiments indicate that even
obstacle-aware DMPs suffer in task success when used in scenarios which largely differ from
the trained demonstration in terms of the start, goal, and obstacles. Our hybrid approach
significantly outperforms obstacle-aware DMPs by successfully completing tasks in cluttered
scenes for a pouring task in simulation. We further demonstrate our method on a real robot
for pouring and scooping tasks.},
keyword = {Motion and Path Planning, Manipulation Planning, Learning from Demonstration}
}
@inproceedings{kingston2021experience-foliations,
title = {Using Experience to Improve Constrained Planning on Foliations for Multi-Modal
Problems},
author = {Kingston, Zachary and Chamzas, Constantinos and Kavraki, Lydia E.},
booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems},
month = sep,
year = {2021},
pages = {6922--6927},
doi = {10.1109/IROS51168.2021.9636236},
abstract = {Many robotic manipulation problems are multi-modal—they consist of a discrete set
of mode families (e.g., whether an object is grasped or placed) each with a continuum of
parameters (e.g., where exactly an object is grasped). Core to these problems is solving
single-mode motion plans, i.e., given a mode from a mode family (e.g., a specific grasp),
find a feasible motion to transition to the next desired mode. Many planners for such
problems have been proposed, but complex manipulation plans may require prohibitively long
computation times due to the difficulty of solving these underlying single-mode problems. It
has been shown that using experience from similar planning queries can significantly improve
the efficiency of motion planning. However, even though modes from the same family are
similar, they impose different constraints on the planning problem, and thus experience
gained in one mode cannot be directly applied to another. We present a new experience-based
framework, ALEF , for such multi-modal planning problems. ALEF learns using paths from
single-mode problems from a mode family, and applies this experience to novel modes from the
same family. We evaluate ALEF on a variety of challenging problems and show a significant
improvement in the efficiency of sampling-based planners both in isolation and within a
multi-modal manipulation planner.},
keyword = {fundamentals of sampling-based motion planning}
}
@inproceedings{chamzas2021-learn-sampling,
title = {{Learning Sampling Distributions Using Local 3D Workspace Decompositions for Motion
Planning in High Dimensions}},
author = {Chamzas, Constantinos and Kingston, Zachary and Quintero-Pe{\~n}a, Carlos and Shrivastava, Anshumali and Kavraki, Lydia E.},
booktitle = {Proceedings of the {IEEE} International Conference on Robotics and Automation},
month = jun,
year = {2021},
pages = {1283--1289},
doi = {10.1109/ICRA48506.2021.9561104},
abstract = {Earlier work has shown that reusing experience from prior motion planning problems
can improve the efficiency of similar, future motion planning queries. However, for robots
with many degrees-of-freedom, these methods exhibit poor generalization across different
environments and often require large datasets that are impractical to gather. We present
SPARK and FLAME, two experience-based frameworks for sampling-based planning applicable to
complex manipulators in 3D environments. Both combine samplers associated with features from
a workspace decomposition into a global biased sampling distribution. SPARK decomposes the
environment based on exact geometry while FLAME is more general, and uses an octree-based
decomposition obtained from sensor data. We demonstrate the effectiveness of SPARK and FLAME
on a real and simulated Fetch robot tasked with challenging pick-and-place manipulation
problems. Our approaches can be trained incrementally and significantly improve performance
with only a handful of examples, generalizing better over diverse tasks and environments as
compared to prior approaches.},
keyword = {fundamentals of sampling-based motion planning},
note = {(Top-4 finalist for best paper in Cognitive Robotics)},
url = {https://dx.doi.org/10.1109/ICRA48506.2021.9561104}
}
@article{pairet2021-path-planning-for-manipulation,
title = {Path Planning for Manipulation Using Experience-Driven Random Trees},
author = {Pairet, Eric and Chamzas, Constantinos and Petillot, Yvan R. and Kavraki, Lydia E.},
journal = {IEEE Robotics and Automation Letters},
month = apr,
year = {2021},
volume = {6},
number = {2},
pages = {3295--3302},
doi = {10.1109/lra.2021.3063063},
abstract = {Robotic systems may frequently come across similar manipulation planning problems
that result in similar motion plans. Instead of planning each problem from scratch, it is
preferable to leverage previously computed motion plans, i.e., experiences, to ease the
planning. Different approaches have been proposed to exploit prior information on novel task
instances. These methods, however, rely on a vast repertoire of experiences and fail when
none relates closely to the current problem. Thus, an open challenge is the ability to
generalise prior experiences to task instances that do not necessarily resemble the prior.
This work tackles the above challenge with the proposition that experiences are
"decomposable" and "malleable", i.e., parts of an experience are suitable to relevantly
explore the connectivity of the robot-task space even in non-experienced regions. Two new
planners result from this insight: experience-driven random trees (ERT) and its
bi-directional version ERTConnect. These planners adopt a tree sampling-based strategy that
incrementally extracts and modulates parts of a single path experience to compose a valid
motion plan. We demonstrate our method on task instances that significantly differ from the
prior experiences, and compare with related state-of-the-art experience-based planners.
While their repairing strategies fail to generalise priors of tens of experiences, our
planner, with a single experience, significantly outperforms them in both success rate and
planning time. Our planners are implemented and freely available in the Open Motion Planning
Library.},
issn = {2377-3774},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
url = {http://dx.doi.org/10.1109/LRA.2021.3063063}
}
@inproceedings{moll2021hyperplan,
title = {{HyperPlan}: A Framework for Motion Planning Algorithm Selection and Parameter
Optimization},
author = {Moll, Mark and Chamzas, Constantinos and Kingston, Zachary and Kavraki, Lydia E.},
booktitle = {{IEEE/RSJ} Intl.\ Conf.\ on Intelligent Robots and Systems},
year = {2021},
pages = {2511--2518},
doi = {10.1109/IROS51168.2021.9636651},
abstract = {Over the years, many motion planning algorithms have been proposed. It is often
unclear which algorithm might be best suited for a particular class of problems. The problem
is compounded by the fact that algorithm performance can be highly dependent on parameter
settings. This paper shows that hyperparameter optimization is an effective tool in both
algorithm selection and parameter tuning over a given set of motion planning problems. We
present different loss functions for optimization that capture different notions of
optimality. The approach is evaluated on a broad range of scenes using two different
manipulators, a Fetch and a Baxter. We show that optimized planning algorithm performance
significantly improves upon baseline performance and generalizes broadly in the sense that
performance improvements carry over to problems that are very different from the ones
considered during optimization.}
}
@inproceedings{kingston2020weighting-multi-modal-leads,
title = {Informing Multi-Modal Planning with Synergistic Discrete Leads},
author = {Kingston, Zachary and Wells, Andrew M. and Moll, Mark and Kavraki, Lydia E.},
booktitle = {{IEEE} International Conference on Robotics and Automation},
year = {2020},
pages = {3199--3205},
doi = {10.1109/ICRA40945.2020.9197545},
abstract = {Robotic manipulation problems are inherently continuous, but typically have
underlying discrete structure, e.g., whether or not an object is grasped. This means many
problems are multi-modal and in particular have a continuous infinity of modes. For example,
in a pick-and-place manipulation domain, every grasp and placement of an object is a mode.
Usually manipulation problems require the robot to transition into different modes, e.g.,
going from a mode with an object placed to another mode with the object grasped. To
successfully find a manipulation plan, a planner must find a sequence of valid single-mode
motions as well as valid transitions between these modes. Many manipulation planners have
been proposed to solve tasks with multi-modal structure. However, these methods require
mode-specific planners and fail to scale to very cluttered environments or to tasks that
require long sequences of transitions. This paper presents a general layered planning
approach to multi-modal planning that uses a discrete "lead" to bias search towards useful
mode transitions. The difficulty of achieving specific mode transitions is captured online
and used to bias search towards more promising sequences of modes. We demonstrate our
planner on complex scenes and show that significant performance improvements are tied to
both our discrete "lead" and our continuous representation.},
keyword = {fundamentals of sampling-based motion planning}
}
@techreport{lewis2019-how-much-do-unstated-problem-constraints,
title = {How Much Do Unstated Problem Constraints Limit Deep Robotic Reinforcement Learning?},
author = {Lewis II, W. Cannon and Moll, Mark and Kavraki, Lydia E.},
month = sep,
year = {2019},
doi = {10.25611/az5z-xt37},
abstract = {Deep Reinforcement Learning is a promising paradigm for robotic control which has
been shown to be capable of learning policies for high-dimensional, continuous control of
unmodeled systems. However, Robotic Reinforcement Learning currently lacks clearly defined
benchmark tasks, which makes it difficult for researchers to reproduce and compare against
prior work. "Reacher" tasks, which are fundamental to robotic manipulation, are commonly
used as benchmarks, but the lack of a formal specification elides details that are crucial
to replication. In this paper we present a novel empirical analysis which shows that the
unstated spatial constraints in commonly used implementations of Reacher tasks make it
dramatically easier to learn a successful control policy with Deep Deterministic Policy
Gradients (DDPG), a state-of-the-art Deep RL algorithm. Our analysis suggests that less
constrained Reacher tasks are significantly more difficult to learn, and hence that existing
de facto benchmarks are not representative of the difficulty of general robotic
manipulation.},
institution = {Rice University}
}
@inproceedings{chamzas2019using-local-experiences-for-global-motion-planning,
title = {Using Local Experiences for Global Motion Planning},
author = {Chamzas, Constantinos and Shrivastava, Anshumali and Kavraki, Lydia E.},
booktitle = {Proceedings of the {IEEE} International Conference on Robotics and Automation},
month = may,
year = {2019},
pages = {8606--8612},
doi = {10.1109/ICRA.2019.8794317},
abstract = {Sampling-based planners are effective in many real-world applications such as
robotics manipulation, navigation, and even protein modeling.However, it is often
challenging to generate a collision-free path in environments where key areas are hard to
sample. In the absence of any prior information, sampling-based planners are forced to
explore uniformly or heuristically, which can lead to degraded performance. One way to
improve performance is to use prior knowledge of environments to adapt the sampling strategy
to the problem at hand. In this work, we decompose the workspace into local primitives,
memorizing local experiences by these primitives in the form of local samplers, and store
them in a database. We synthesize an efficient global sampler by retrieving local
experiences relevant to the given situation. Our method transfers knowledge effectively
between diverse environments that share local primitives and speeds up the performance
dramatically. Our results show, in terms of solution time, an improvement of multiple orders
of magnitude in two traditionally challenging high-dimensional problems compared to
state-of-the-art approaches.},
keyword = {fundamentals of sampling-based motion planning}
}
@article{hernandez2019online-motion-planning-auvs,
title = {Online Motion Planning for Unexplored Underwater Environments using Autonomous
Underwater Vehicles},
author = {Hern{\'a}ndez, Juan David and Vidal, Eduard and Moll, Mark and Palomeras, Narc{\'i}s and Carreras, Marc and Kavraki, Lydia E.},
journal = {Journal of Field Robotics},
year = {2019},
volume = {36},
pages = {370--396},
doi = {10.1002/rob.21827},
abstract = {We present an approach to endow an autonomous underwater vehicle (AUV) with the
capabilities to move through unexplored environments. To do so, we propose a computational
framework for planning feasible and safe paths. The framework allows the vehicle to
incrementally build a map of the surroundings, while simultaneously (re)planning a feasible
path to a specified goal. To accomplish this, the framework considers motion constraints to
plan feasible 3D paths, i.e., those that meet the vehicle’s motion capabilities. It also
incorporates a risk function to avoid navigating close to nearby obstacles. Furthermore, the
framework makes use of two strategies to ensure meeting online computation limitations. The
first one is to reuse the last best known solution to eliminate time-consuming pruning
routines. The second one is to opportunistically check the states’ risk of collision.
To evaluate the proposed approach, we use the Sparus II performing autonomous missions in
different real-world scenarios. These experiments consist of simulated and in-water trials
for different tasks. The conducted tasks include the exploration of challenging scenarios
such as artificial marine structures, natural marine structures, and confined natural
environments. All these applications allow us to extensively prove the efficacy of the
presented approach, not only for constant-depth missions (2D), but, more importantly, for
situations in which the vehicle must vary its depth (3D).},
issue = {2},
keyword = {other robotics}
}
@article{muhayyuddin2018randomized-physics-based-motion-planning,
title = {Randomized Physics-based Motion Planning for Grasping in Cluttered and Uncertain
Environments},
author = {Muhayyuddin and Moll, Mark and Kavraki, Lydia E. and Rosell, Jan},
journal = {IEEE Robotics and Automation Letters},
month = apr,
year = {2018},
volume = {3},
number = {2},
pages = {712--719},
doi = {10.1109/LRA.2017.2783445},
abstract = {Planning motions to grasp an object in cluttered and uncertain environments is a
challenging task, particularly when a collision-free trajectory does not exist and objects
obstructing the way are required to be carefully grasped and moved out. This paper takes a
different approach and proposes to address this problem by using a randomized physics-based
motion planner that permits robot-object and object-object interactions. The main idea is to
avoid an explicit high-level reasoning of the task by providing the motion planner with a
physics engine to evaluate possible complex multi-body dynamical interactions. The approach
is able to solve the problem in complex scenarios, also considering uncertainty in the
objects' pose and in the contact dynamics. The work enhances the state validity checker, the
control sampler and the tree exploration strategy of a kinody- namic motion planner called
KPIECE. The enhanced algorithm, called p-KPIECE, has been validated in simulation and with
real experiments. The results have been compared with an ontological physics-based motion
planner and with task and motion planning approaches, resulting in a significant improvement
in terms of planning time, success rate and quality of the solution path.},
keyword = {kinodynamic systems}
}