In contrast to humans who use many types of manipulation to accomplish daily tasks and can easily sequence and execute pertinent actions, robots are confined to simple tasks that are often painstakingly broken down by the humans who operate those robots. Towards the goal of increasing robot autonomy, this project will extend the capabilities of manipulation planners. For the purposes of this research, manipulation planning is the domain in-between classical motion planning and what is often called task and motion planning, which includes temporal reasoning and high-order logics. This research adopts a constraint-centric view and defines a set of low-dimensional subspaces, or modes, amongst which the system must transition. The definition of transitions is also constraint-centric and is only possible because of the unified approach used when considering modes. The work depends on constructs from differential geometry and the use of powerful motion planners. It adopts a synergistic layered scheme where a discrete planner decides the sequence of modes while being constantly informed by a continuous planner that attempts the transitions between modes. The work will start with a specific but general type of constraints, manifold constraints, and later expand to other types. The proposed research will identify the limits of using constraints as a unifying construct in manipulation planning and in doing so, it will also allow for the incorporation of manipulation-specific primitives that can extend the framework.
This work has been supported by grant NSF RI 2008720.
@inproceedings{liang2024-scaling,
title = {Scaling Long-Horizon Online POMDP Planning via Rapid State Space Sampling},
author = {Liang, Yuanchu and Kim, Edward and Thomason, Wil and Kingston, Zachary and Kurniawati, Hanna and Kavraki, Lydia E.},
booktitle = {Robotics Research},
note = {Appeared at International Symposium of Robotics Research (ISRR) 2024.},
year = {2025},
abstract = {Partially Observable Markov Decision Processes (POMDPs) are a general and principled
framework for motion planning under uncertainty. Despite tremendous improvement in the
scalability of POMDP solvers, long-horizon POMDPs (e.g., steps) remain difficult to solve.
This paper proposes a new approximate online POMDP solver, called Reference-Based Online
POMDP Planning via Rapid State Space Sampling (ROP-RaS3). ROP-RaS3 uses novel extremely fast
sampling-based motion planning techniques to sample the state space and generate a diverse
set of macro actions online which are then used to bias belief-space sampling and infer
high-quality policies without requiring exhaustive enumeration of the action space -- a
fundamental constraint for modern online POMDP solvers. ROP-RaS3 is evaluated on various
long-horizon POMDPs, including on a problem with a planning horizon of more than 100 steps
and a problem with a 15-dimensional state space that requires more than 20 look ahead steps.
In all of these problems, ROP-RaS3 substantially outperforms other state-of-the-art methods
by up to multiple folds.}
}
@article{orthey2024-review-sampling,
author = {Orthey, Andreas and Chamzas, Constantinos and Kavraki, Lydia E.},
title = {Sampling-Based Motion Planning: A Comparative Review},
journal = {Annual Review of Control, Robotics, and Autonomous Systems},
volume = {7},
number = {1},
pages = {285-310},
year = {2024},
doi = {10.1146/annurev-control-061623-094742},
url = {https://doi.org/10.1146/annurev-control-061623-094742},
month = jul,
abstract = {Sampling-based motion planning is one of the fundamental paradigms to generate robot
motions, and a cornerstone of robotics research. This comparative review provides an
up-to-date guide and reference manual for the use of sampling-based motion planning
algorithms. It includes a history of motion planning, an overview of the most successful
planners, and a discussion of their properties. It also shows how planners can handle
special cases and how extensions of motion planning can be accommodated. To put
sampling-based motion planning into a larger context, a discussion of alternative motion
generation frameworks highlights their respective differences from sampling-based motion
planning. Finally, a set of sampling-based motion planners are compared on 24 challenging
planning problems in order to provide insights into which planners perform well in which
situations and where future research would be required. This comparative review thereby
provides not only a useful reference manual for researchers in the field but also a guide
for practitioners to make informed algorithmic decisions.}
}
@inproceedings{muvvala2024games,
author = {Muvvala, Karan and Wells, Andrew M. and Lahijanian, Morteza and Kavraki, Lydia E. and Vardi, Moshe Y.},
title = {Stochastic Games for Interactive Manipulation Domains},
year = {2024},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
abstract = {As robots become more prevalent, the complexity of robot-robot, robot-human, and
robot-environment interactions increases. In these interactions, a robot needs to consider
not only the effects of its own actions, but also the effects of other agents’ actions and
the possible interactions between agents. Previous works have considered reactive synthesis,
where the human/environment is modeled as a deterministic, adversarial agent; as well as
probabilistic synthesis, where the human/environment is modeled via a Markov chain. While
they provide strong theoretical frameworks, there are still many aspects of human-robot
interaction that cannot be fully expressed and many assumptions that must be made in each
model. In this work, we propose stochastic games as a general model for human-robot
interaction, which subsumes the expressivity of all previous representations. In addition,
it allows us to make fewer modeling assumptions and leads to more natural and powerful
models of interaction. We introduce the semantics of this abstraction and show how existing
tools can be utilized to synthesize strategies to achieve complex tasks with guarantees.
Further, we discuss the current computational limitations and improve the scalability by two
orders of magnitude by a new way of constructing models for PRISM-games.},
doi = {10.1109/ICRA57147.2024.10611623},
pages = {2513--2519},
url = {https://ieeexplore.ieee.org/document/10611623},
keywords = {Computational modeling,Scalability,Semantics,Stochastic processes,Human-robot
interaction,Games,Probabilistic logic}
}
@inproceedings{thomason2024vamp,
author = {Thomason, Wil and Kingston, Zachary and Kavraki, Lydia E.},
title = {Motions in Microseconds via Vectorized Sampling-Based Planning},
year = {2024},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
abstract = {Modern sampling-based motion planning algorithms typically take between hundreds of
milliseconds to dozens of seconds to find collision-free motions for high degree-of-freedom
problems. This paper presents performance improvements of more than 500x over the
state-of-the-art, bringing planning times into the range of microseconds and solution rates
into the range of kilohertz, without specialized hardware. Our key insight is how to exploit
fine-grained parallelism within sampling-based planners, providing generality-preserving
algorithmic improvements to any such planner and significantly accelerating critical
subroutines, such as forward kinematics and collision checking. We demonstrate our approach
over a diverse set of challenging, realistic problems for complex robots ranging from 7 to
14 degrees-of-freedom. Moreover, we show that our approach does not require high-power
hardware by also evaluating on a low-power single-board computer. The planning speeds
demonstrated are fast enough to reside in the range of control frequencies and open up new
avenues of motion planning research.},
pages = {8749--8756},
doi = {10.1109/ICRA57147.2024.10611190},
url = {https://ieeexplore.ieee.org/document/10611190}
}
@inproceedings{quintero2024impdist,
author = {Quintero-Pe{\~n}a, Carlos and Thomason, Wil and Kingston, Zachary and Kyrillidis, Anastasios and Kavraki, Lydia E.},
title = {Stochastic Implicit Neural Signed Distance Functions for Safe Motion Planning under
Sensing Uncertainty},
year = {2024},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
abstract = {Motion planning under sensing uncertainty is critical for robots in unstructured
environments to guarantee safety for both the robot and any nearby humans. Most work on
planning under uncertainty does not scale to high-dimensional robots such as manipulators,
assumes simplified geometry of the robot or environment, or requires per-object knowledge of
noise. Instead, we propose a method that directly models sensor-specific aleatoric
uncertainty to find safe motions for high-dimensional systems in complex environments,
without exact knowledge of environment geometry. We combine a novel implicit neural model of
stochastic signed distance functions with a hierarchical optimization-based motion planner
to plan low-risk motions without sacrificing path quality. Our method also explicitly bounds
the risk of the path, offering trustworthiness. We empirically validate that our method
produces safe motions and accurate risk bounds and is safer than baseline approaches.},
pages = {2360--2367},
doi = {10.1109/ICRA57147.2024.10610773},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10610773}
}
@inproceedings{quintero2023-optimal-tmp,
title = {Optimal Grasps and Placements for Task and Motion Planning in Clutter},
author = {Quintero-Pe{\~n}a, Carlos and Kingston, Zachary and Pan, Tianyang and Shome, Rahul and Kyrillidis, Anastasios and Kavraki, Lydia E.},
booktitle = {2023 International Conference on Robotics and Automation (ICRA)},
year = {2023},
pages = {3707--3713},
doi = {10.1109/ICRA48891.2023.10161455},
month = may,
abstract = {Many methods that solve robot planning problems, such as task and motion planners,
employ discrete symbolic search to find sequences of valid symbolic actions that are
grounded with motion planning. Much of the efficacy of these planners lies in this
grounding—bad placement and grasp choices can lead to inefficient planning when a problem
has many geometric constraints. Moreover, grounding methods such as naı̈ve sampling often
fail to find appropriate values for these choices in the presence of clutter. Towards
efficient task and motion planning, we present a novel optimization-based approach for
grounding to solve cluttered problems that have many constraints that arise from geometry.
Our approach finds an optimal grounding and can provide feedback to discrete search for more
effective planning. We demonstrate our method against baseline methods in complex simulated
environments.}
}
@inproceedings{ren2023-kinodynamic,
author = {Ren, Kejia and Chanrungmaneekul, Podshara and Kavraki, Lydia E. and Hang, Kaiyu},
booktitle = {2023 IEEE International Conference on Robotics and Automation (ICRA)},
title = {Kinodynamic Rapidly-exploring Random Forest for Rearrangement-Based Nonprehensile
Manipulation},
year = {2023},
month = may,
pages = {8127--8133},
doi = {10.1109/ICRA48891.2023.10161560},
abstract = {Rearrangement-based nonprehensile manipulation still remains as a challenging
problem due to the high-dimensional problem space and the complex physical uncertainties it
entails. We formulate this class of problems as a coupled problem of local rearrangement and
global action optimization by incorporating free-space transit motions between constrained
rearranging actions. We propose a forest-based kinodynamic planning framework to
concurrently search in multiple problem regions, so as to enable global exploration of the
most task-relevant subspaces, while facilitating effective switches between local
rearranging actions. By interleaving dynamic horizon planning and action execution, our
framework can adaptively handle real-world uncertainties. With extensive experiments, we
show that our framework significantly improves the planning efficiency and manipulation
effectiveness while being robust against various uncertainties.}
}
@inproceedings{sobti2023-temporal-task,
title = {Efficient Inference of Temporal Task Specifications from Human Demonstrations using
Experiment Design},
author = {Sobti, Shlok and Shome, Rahul and Kavraki, Lydia E.},
booktitle = {2023 International Conference on Robotics and Automation (ICRA)},
year = {2023},
pages = {9764--9770},
doi = {10.1109/ICRA48891.2023.10160692},
month = may,
abstract = {Robotic deployments in human environments have motivated the need for autonomous
systems to be able to interact with humans and solve tasks effectively. Human demonstrations
of tasks can be used to infer underlying task specifications, commonly modeled with temporal
logic. State-of-the-art methods have developed Bayesian inference tools to estimate a
temporal logic formula from a sequence of demonstrations. The current work proposes the use
of experiment design to choose environments for humans to perform these demonstrations. This
reduces the number of demonstrations needed to estimate the unknown ground truth formula
with low error. A novel computationally efficient strategy is proposed to generate
informative environments by using an optimal planner as the model for the demonstrator.
Instead of evaluating all possible environments, the search space reduces to the placement
of informative orderings of likely eventual goals along an optimal planner’s solution. A
human study with 600 demonstrations from 20 participants for 4 tasks on a 2D interface
validates the proposed hypothesis and empirical performance benefit in terms of convergence
and error over baselines. The human study dataset is also publicly shared.}
}
@inproceedings{lee2023-simulation-actions,
title = {Object Reconfiguration with Simulation-Derived Feasible Actions},
author = {Lee, Yiyuan and Thomason, Wil and Kingston, Zachary and Kavraki, Lydia E.},
booktitle = {2023 International Conference on Robotics and Automation (ICRA)},
year = {2023},
pages = {8104--8111},
doi = {10.1109/ICRA48891.2023.10160377},
month = may,
abstract = {3D object reconfiguration encompasses common robot manipulation tasks in which a set
of objects must be moved through a series of physically feasible state changes into a
desired final configuration. Object reconfiguration is challenging to solve in general, as
it requires efficient reasoning about environment physics that determine action validity.
This information is typically manually encoded in an explicit transition system.
Constructing these explicit encodings is tedious and error-prone, and is often a bottleneck
for planner use. In this work, we explore embedding a physics simulator within a motion
planner to implicitly discover and specify the valid actions from any state, removing the
need for manual specification of action semantics. Our experiments demonstrate that the
resulting simulation-based planner can effectively produce physically valid rearrangement
trajectories for a range of 3D object reconfiguration problems without requiring more than
an environment description and start and goal arrangements.}
}
@article{kingston2022-scaling-mmp,
author = {Kingston, Zachary and Kavraki, Lydia E.},
journal = {IEEE Transactions on Robotics},
title = {Scaling Multimodal Planning: Using Experience and Informing Discrete Search},
month = feb,
year = {2023},
volume = {39},
number = {1},
pages = {128--146},
doi = {10.1109/TRO.2022.3197080},
abstract = {Robotic manipulation is inherently continuous, but typically has an underlying
discrete structure, such as if an object is grasped. Many problems like these are
multi-modal, such as pick-and-place tasks where every object grasp and placement is a mode.
Multi-modal problems require finding a sequence of transitions between modes - for example,
a particular sequence of object picks and placements. However, many multi-modal planners
fail to scale when motion planning is difficult (e.g., in clutter) or the task has a long
horizon (e.g., rearrangement). This work presents solutions for multi-modal scalability in
both these areas. For motion planning, we present an experience-based planning framework
ALEF which reuses experience from similar modes both online and from training data. For task
satisfaction, we present a layered planning approach that uses a discrete lead to bias
search towards useful mode transitions, informed by weights over mode transitions. Together,
these contributions enable multi-modal planners to tackle complex manipulation tasks that
were previously infeasible or inefficient, and provide significant improvements in scenes
with high-dimensional robots.},
keyword = {fundamentals of sampling-based motion planning}
}
@inproceedings{shome2023privacy,
author = {Shome, Rahul and Kingston, Zachary and Kavraki, Lydia E.},
title = {Robots as {AI} Double Agents: Privacy in Motion Planning},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
year = {2023},
pages = {2861--2868},
abstract = {Robotics and automation are poised to change the landscape of home and work in the
near future. Robots are adept at deliberately moving, sensing, and interacting with their
environments. The pervasive use of this technology promises societal and economic payoffs
due to its capabilities - conversely, the capabilities of robots to move within and sense
the world around them is susceptible to abuse. Robots, unlike typical sensors, are
inherently autonomous, active, and deliberate. Such automated agents can become AI double
agents liable to violate the privacy of coworkers, privileged spaces, and other
stakeholders. In this work we highlight the understudied and inevitable threats to privacy
that can be posed by the autonomous, deliberate motions and sensing of robots. We frame the
problem within broader sociotechnological questions alongside a comprehensive review. The
privacy-aware motion planning problem is formulated in terms of cost functions that can be
modified to induce privacy-aware behavior - preserving, agnostic, or violating. Simulated
case studies in manipulation and navigation, with altered cost functions, are used to
demonstrate how privacy-violating threats can be easily injected, sometimes with only small
changes in performance (solution path lengths). Such functionality is already widely
available. This preliminary work is meant to lay the foundations for near-future, holistic,
interdisciplinary investigations that can address questions surrounding privacy in
intelligent robotic behaviors determined by planning algorithms.},
url = {https://ieeexplore.ieee.org/document/10341460},
doi = {10.1109/IROS55552.2023.10341460}
}
@article{bayraktar2023-rearrangement,
author = {Bayraktar, Servet B. and Orthey, Andreas and Kingston, Zachary and Toussaint, Marc and Kavraki, Lydia E.},
journal = {IEEE Robotics and Automation Letters},
title = {Solving Rearrangement Puzzles using Path Defragmentation in Factored State Spaces},
year = {2023},
volume = {},
number = {},
pages = {1-8},
doi = {10.1109/LRA.2023.3282788},
abstract = {Rearrangement puzzles are variations of rearrangement problems in which the elements
of a problem are potentially logically linked together. To efficiently solve such puzzles,
we develop a motion planning approach based on a new state space that is logically factored,
integrating the capabilities of the robot through factors of simultaneously manipulatable
joints of an object. Based on this factored state space, we propose less-actions RRT
(LA-RRT), a planner which optimizes for a low number of actions to solve a puzzle. At the
core of our approach lies a new path defragmentation method, which rearranges and optimizes
consecutive edges to minimize action cost. We solve six rearrangement scenarios with a Fetch
robot, involving planar table puzzles and an escape room scenario. LA-RRT significantly
outperforms the next best asymptotically-optimal planner by 4.01 to 6.58 times improvement
in final action cost.}
}
@article{verginis2022-kdf,
author = {Verginis, Christos K. and Dimarogonas, Dimos V. and Kavraki, Lydia E.},
abstract = {We integrate sampling-based planning techniques with funnel-based feedback control
to develop KDF, a new framework for solving the kinodynamic motion-planning problem via
funnel control. The considered systems evolve subject to complex, nonlinear, and uncertain
dynamics (also known as differential constraints). First, we use a geometric planner to
obtain a high-level safe path in a user-defined extended free space. Second, we develop a
low-level funnel control algorithm that guarantees safe tracking of the path by the system.
Neither the planner nor the control algorithm uses information on the underlying dynamics of
the system, which makes the proposed scheme easily distributable to a large variety of
different systems and scenarios. Intuitively, the funnel control module is able to
implicitly accommodate the dynamics of the system, allowing hence the deployment of purely
geometrical motion planners. Extensive computer simulations and hardware experiments with a
6-DOF robotic arm validate the proposed approach.},
journal = {IEEE Transactions on Robotics},
title = {KDF: Kinodynamic Motion Planning via Geometric Sampling-Based Algorithms and Funnel
Control},
year = {2023},
volume = {39},
number = {2},
pages = {978--997},
doi = {10.1109/TRO.2022.3208502}
}
@inproceedings{kingston2022-robowflex,
abstract = {Robowflex is a software library for robot motion planning in industrial and research
applications, leveraging the popular MoveIt library and Robot Operating System (ROS)
middleware. Robowflex takes advantage of the ease of motion planning with MoveIt while
providing an augmented API to craft and manipulate motion planning queries within a single
program. Robowflex's high-level API simplifies many common use-cases while still providing
access to the underlying MoveIt library. Robowflex is particularly useful for 1) developing
new motion planners, 2) evaluation of motion planners, and 3) complex problems that use
motion planning (e.g., task and motion planning). Robowflex also provides visualization
capabilities, integrations to other robotics libraries (e.g., DART and Tesseract), and is
complimentary to many other robotics packages. With our library, the user does not need to
be an expert at ROS or MoveIt in order to set up motion planning queries, extract
information from results, and directly interface with a variety of software components. We
provide a few example use-cases that demonstrate its efficacy.},
author = {Kingston, Zachary and Kavraki, Lydia E.},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems},
title = {Robowflex: Robot Motion Planning with MoveIt Made Easy},
year = {2022},
month = oct,
pages = {3108--3114},
doi = {10.1109/IROS47612.2022.9981698}
}
@inproceedings{ren2022-rearrangement,
abstract = {Robot manipulation in cluttered environments often requires complex and sequential
rearrangement of multiple objects in order to achieve the desired reconfiguration of the
target objects. Due to the sophisticated physical interactions involved in such scenarios,
rearrangement-based manipulation is still limited to a small range of tasks and is
especially vulnerable to physical uncertainties and perception noise. This paper presents a
planning framework that leverages the efficiency of sampling-based planning approaches, and
closes the manipulation loop by dynamically controlling the planning horizon. Our approach
interleaves planning and execution to progressively approach the manipulation goal while
correcting any errors or path deviations along the process. Meanwhile, our framework allows
the definition of manipulation goals without requiring explicit goal configurations,
enabling the robot to flexibly interact with all objects to facilitate the manipulation of
the target ones. With extensive experiments both in simulation and on a real robot, we
evaluate our framework on three manipulation tasks in cluttered environments: grasping,
relocating, and sorting. In comparison with two baseline approaches, we show that our
framework can significantly improve planning efficiency, robustness against physical
uncertainties, and task success rate under limited time budgets.},
author = {Ren, Kejia and Kavraki, Lydia E. and Hang, Kaiyu},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems},
title = {Rearrangement-Based Manipulation via Kinodynamic Planning and Dynamic Planning
Horizons},
year = {2022},
month = oct,
pages = {1145--1152},
doi = {10.1109/IROS47612.2022.9981599}
}
@inproceedings{chamzas2022-contrastive-visual-task-planning,
title = {Comparing Reconstruction-and Contrastive-based Models for Visual Task Planning},
author = {Chamzas, Constantinos and Lippi, Martina and C. Welle, Michael and Varava, Anastasia and E. Kavraki, Lydia and Kragic, Danica},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems},
month = oct,
year = {2022},
pages = {12550--12557},
doi = {10.1109/IROS47612.2022.9981533},
abstract = {Learning state representations enables robotic planning directly from raw
observations such as images. Most methods learn state representations by utilizing losses
based on the reconstruction of the raw observations from a lower-dimensional latent space.
The similarity between observations in the space of images is often assumed and used as a
proxy for estimating similarity between the underlying states of the system. However,
observations commonly contain task-irrelevant factors of variation which are nonetheless
important for reconstruction, such as varying lighting and different camera viewpoints. In
this work, we define relevant evaluation metrics and perform a thorough study of different
loss functions for state representation learning. We show that models exploiting task
priors, such as Siamese networks with a simple contrastive loss, outperform
reconstruction-based representations in visual task planning.},
keyword = {fundamentals of sampling-based motion planning}
}
@article{lee2022-apes,
title = {Adaptive Experience Sampling for Motion Planning using the Generator-Critic Framework},
author = {Lee, Yiyuan and Chamzas, Constantinos and E. Kavraki, Lydia},
journal = {IEEE Robotics and Automation Letters},
volume = {7},
number = {4},
month = jul,
year = {2022},
pages = {9437--9444},
doi = {10.1109/LRA.2022.3191803},
abstract = {Sampling-based motion planners are widely used for motion planning with high-dof
robots. These planners generally rely on a uniform distribution to explore the search space.
Recent work has explored learning biased sampling distributions to improve the time
efficiency of these planners. However, learning such distributions is challenging, since
there is no direct connection between the choice of distributions and the performance of the
downstream planner. To alleviate this challenge, this paper proposes APES, a framework that
learns sampling distributions optimized directly for the planner's performance. This is done
using a critic, which serves as a differentiable surrogate objective modeling the planner's
performance - thus allowing gradients to circumvent the non-differentiable planner.
Leveraging the differentiability of the critic, we train a generator, which outputs sampling
distributions optimized for the given problem instance. We evaluate APES on a series of
realistic and challenging high-dof manipulation problems in simulation. Our experimental
results demonstrate that APES can learn high-quality distributions that improve planning
performance more than other biased sampling baselines.},
keyword = {fundamentals of sampling-based motion planning},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)}
}
@inproceedings{quintero-chamzas2022-blind,
title = {Human-Guided Motion Planning in Partially Observable Environments},
author = {Quintero-Pe{\~n}a, Carlos and Chamzas, Constantinos and Sun, Zhanyi and Unhelkar, Vaibhav and Kavraki, Lydia E},
booktitle = {2022 International Conference on Robotics and Automation (ICRA)},
month = may,
year = {2022},
pages = {7226--7232},
doi = {10.1109/ICRA46639.2022.9811893},
abstract = {Motion planning is a core problem in robotics, with a range of existing methods
aimed to address its diverse set of challenges. However, most existing methods rely on
complete knowledge of the robot environment; an assumption that seldom holds true due to
inherent limitations of robot perception. To enable tractable motion planning for high-DOF
robots under partial observability, we introduce BLIND, an algorithm that leverages human
guidance. BLIND utilizes inverse reinforcement learning to derive motion-level guidance from
human critiques. The algorithm overcomes the computational challenge of reward learning for
high-DOF robots by projecting the robot’s continuous configuration space to a
motion-planner-guided discrete task model. The learned reward is in turn used as guidance to
generate robot motion using a novel motion planner. We demonstrate BLIND using the Fetch
robot an dperform two simulation experiments with partial observability. Our experiments
demonstrate that, despite the challenge of partial observability and high dimensionality,
BLIND is capable of generating safe robot motion and outperforms baselines on metrics of
teaching efficiency, success rate, and path quality.},
keyword = {uncertainty},
publisher = {IEEE}
}
@inproceedings{pan2022failing-execution,
title = {Failure is an option: Task and Motion Planning with Failing Executions},
author = {Pan, Tianyang and Wells, Andrew M. and Shome, Rahul and Kavraki, Lydia E.},
booktitle = {2022 International Conference on Robotics and Automation (ICRA)},
month = may,
year = {2022},
pages = {1947--1953},
doi = {10.1109/ICRA46639.2022.9812273},
abstract = {Future robotic deployments will require robots to be able to repeatedly solve a
variety of tasks in application domains. Task and motion planning addresses complex robotic
problems that combine discrete reasoning over states and actions and geometric interactions
during action executions. Moving beyond deterministic settings, stochastic actions can be
handled by modeling the problem as a Markov Decision Process. The underlying probabilities
however are typically hard to model since failures might be caused by hardware
imperfections, sensing noise, or physical interactions. We propose a framework to address a
task and motion planning setting where actions can fail during execution. To achieve a task
goal actions need to be computed and executed despite failures. The robot has to infer which
actions are robust and for each new problem effectively choose a solution that reduces
expected execution failures. The key idea is to continually recover and refine the
underlying beliefs associated with actions across multiple different problems in the domain.
Our proposed method can find solutions that reduce the expected number of discrete, executed
actions. Results in physics-based simulation indicate that our method outperforms baseline
replanning strategies to deal with failing executions},
keyword = {task and motion planning},
publisher = {IEEE}
}
@article{chamzas2022-motion-bench-maker,
title = {MotionBenchMaker: A Tool to Generate and Benchmark Motion Planning Datasets},
author = {Chamzas, Constantinos and Quintero-Pe{\~n}a, Carlos and Kingston, Zachary and Orthey, Andreas and Rakita, Daniel and Gleicher, Michael and Toussaint, Marc and E. Kavraki, Lydia},
journal = {IEEE Robotics and Automation Letters},
month = apr,
year = {2022},
volume = {7},
number = {2},
pages = {882--889},
doi = {10.1109/LRA.2021.3133603},
abstract = {Recently, there has been a wealth of development in motion planning for robotic
manipulationnew motion planners are continuously proposed, each with its own unique set of
strengths and weaknesses. However, evaluating these new planners is challenging, and
researchers often create their own ad-hoc problems for benchmarking, which is
time-consuming, prone to bias, and does not directly compare against other state-of-the-art
planners. We present MotionBenchMaker, an open-source tool to generate benchmarking datasets
for realistic robot manipulation problems. MotionBenchMaker is designed to be an extensible,
easy-to-use tool that allows users to both generate datasets and benchmark them by comparing
motion planning algorithms. Empirically, we show the benefit of using MotionBenchMaker as a
tool to procedurally generate datasets which helps in the fair evaluation of planners. We
also present a suite of over 40 prefabricated datasets, with 5 different commonly used
robots in 8 environments, to serve as a common ground for future motion planning research.},
issn = {2377-3766},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
url = {https://dx.doi.org/10.1109/LRA.2021.3133603}
}
@inproceedings{sobti2021-complex-motor-actions,
title = {{A Sampling-based Motion Planning Framework for Complex Motor Actions}},
author = {Sobti, Shlok and Shome, Rahul and Chaudhuri, Swarat and Kavraki, Lydia E.},
booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent Robots and
Systems},
month = sep,
year = {2021},
pages = {6928--6934},
doi = {10.1109/IROS51168.2021.9636395},
abstract = {We present a framework for planning complex motor actions such as pouring or
scooping from arbitrary start states in cluttered real-world scenes. Traditional approaches
to such tasks use dynamic motion primitives (DMPs) learned from human demonstrations. We
enhance a recently proposed state-of-the-art DMP technique capable of obstacle avoidance by
including them within a novel hybrid framework. This complements DMPs with sampling-based
motion planning algorithms, using the latter to explore the scene and reach promising
regions from which a DMP can successfully complete the task. Experiments indicate that even
obstacle-aware DMPs suffer in task success when used in scenarios which largely differ from
the trained demonstration in terms of the start, goal, and obstacles. Our hybrid approach
significantly outperforms obstacle-aware DMPs by successfully completing tasks in cluttered
scenes for a pouring task in simulation. We further demonstrate our method on a real robot
for pouring and scooping tasks.},
keyword = {Motion and Path Planning, Manipulation Planning, Learning from Demonstration}
}
@inproceedings{shome2021-bundle-of-edges,
title = {{Asymptotically Optimal Kinodynamic Planning Using Bundles of Edges}},
author = {Shome, Rahul and Kavraki, Lydia E.},
booktitle = {2021 IEEE International Conference on Robotics and Automation (ICRA)},
month = jun,
year = {2021},
pages = {9988--9994},
doi = {10.1109/ICRA48506.2021.9560836},
abstract = {Using sampling to estimate the connectivity of high-dimensional configuration spaces
has been the theoretical underpinning for effective sampling-based motion planners. Typical
strategies either build a roadmap, or a tree as the underlying search structure that
connects sampled configurations, with a focus on guaranteeing completeness and optimality as
the number of samples tends to infinity. Roadmap-based planners allow preprocessing the
space, and can solve multiple kinematic motion planning problems, but need a steering
function to connect pairwise-states. Such steering functions are difficult to define for
kinodynamic systems, and limit the applicability of roadmaps to motion planning problems
with dynamical systems. Recent advances in the analysis of single-query tree-based planners
has shown that forward search trees based on random propagations are asymptotically optimal.
The current work leverages these recent results and proposes a multi-query framework for
kinodynamic planning. Bundles of kinodynamic edges can be sampled to cover the state space
before the query arrives. Then, given a motion planning query, the connectivity of the state
space reachable from the start can be recovered from a forward search tree reasoning about a
local neighborhood of the edge bundle from each tree node. The work demonstrates
theoretically that considering any constant radial neighborhood during this process is
sufficient to guarantee asymptotic optimality. Experimental validation in five and twelve
dimensional simulated systems also highlights the ability of the proposed edge bundles to
express high-quality kinodynamic solutions. Our approach consistently finds higher quality
solutions compared to SST, and RRT, often with faster initial solution times. The strategy
of sampling kinodynamic edges is demonstrated to be a promising new paradigm.},
keyword = {Motion Planning, Asymptotic Optimality, Kinodynamic Planning, Bundle Of Edges}
}
@inproceedings{quintero2021-robust-motion-planning,
title = {{Robust Optimization-based Motion Planning for high-DOF Robots under Sensing
Uncertainty}},
author = {Quintero-Pe{\~n}a, Carlos and Kyrillidis, Anastasios and Kavraki, Lydia E.},
booktitle = {2021 IEEE International Conference on Robotics and Automation (ICRA)},
month = jun,
year = {2021},
pages = {9724--9730},
doi = {10.1109/ICRA48506.2021.9560917},
abstract = {Motion planning for high degree-of-freedom (DOF) robots is challenging, especially
when acting in complex environments under sensing uncertainty. While there is significant
work on how to plan under state uncertainty for low-DOF robots, existing methods cannot be
easily translated into the high-DOF case, due to the complex geometry of the robot's body
and its environment. In this paper, we present a method that enhances optimization-based
motion planners to produce robust trajectories for high-DOF robots for convex obstacles. Our
approach introduces robustness into planners that are based on sequential convex
programming: We reformulate each convex subproblem as a robust optimization problem that
``protects'' the solution against deviations due to sensing uncertainty. The parameters of
the robust problem are estimated by sampling from the distribution of noisy obstacles, and
performing a first-order approximation of the signed distance function. The original merit
function is updated to account for the new costs of the robust formulation at every step.
The effectiveness of our approach is demonstrated on two simulated experiments that involve
a full body square robot, that moves in randomly generated scenes, and a 7-DOF Fetch robot,
performing tabletop operations. The results show nearly zero probability of collision for a
reasonable range of the noise parameters for Gaussian and Uniform uncertainty.},
keyword = {uncertainty}
}
@article{pairet2021-path-planning-for-manipulation,
title = {Path Planning for Manipulation Using Experience-Driven Random Trees},
author = {Pairet, Eric and Chamzas, Constantinos and Petillot, Yvan R. and Kavraki, Lydia E.},
journal = {IEEE Robotics and Automation Letters},
month = apr,
year = {2021},
volume = {6},
number = {2},
pages = {3295--3302},
doi = {10.1109/lra.2021.3063063},
abstract = {Robotic systems may frequently come across similar manipulation planning problems
that result in similar motion plans. Instead of planning each problem from scratch, it is
preferable to leverage previously computed motion plans, i.e., experiences, to ease the
planning. Different approaches have been proposed to exploit prior information on novel task
instances. These methods, however, rely on a vast repertoire of experiences and fail when
none relates closely to the current problem. Thus, an open challenge is the ability to
generalise prior experiences to task instances that do not necessarily resemble the prior.
This work tackles the above challenge with the proposition that experiences are
"decomposable" and "malleable", i.e., parts of an experience are suitable to relevantly
explore the connectivity of the robot-task space even in non-experienced regions. Two new
planners result from this insight: experience-driven random trees (ERT) and its
bi-directional version ERTConnect. These planners adopt a tree sampling-based strategy that
incrementally extracts and modulates parts of a single path experience to compose a valid
motion plan. We demonstrate our method on task instances that significantly differ from the
prior experiences, and compare with related state-of-the-art experience-based planners.
While their repairing strategies fail to generalise priors of tens of experiences, our
planner, with a single experience, significantly outperforms them in both success rate and
planning time. Our planners are implemented and freely available in the Open Motion Planning
Library.},
issn = {2377-3774},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
url = {http://dx.doi.org/10.1109/LRA.2021.3063063}
}