CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–22 of 22 results for author: <span class="mathjax">Hein, B</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Hein%2C+B">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Hein, B"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Hein%2C+B&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Hein, B"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14381">arXiv:2411.14381</a> <span> [<a href="https://arxiv.org/pdf/2411.14381">pdf</a>, <a href="https://arxiv.org/format/2411.14381">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> ETA-IK: Execution-Time-Aware Inverse Kinematics for Dual-Arm Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tang%2C+Y">Yucheng Tang</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+X">Xi Huang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+Y">Yongzhou Zhang</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+T">Tao Chen</a>, <a href="/search/cs?searchtype=author&query=Mamaev%2C+I">Ilshat Mamaev</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14381v1-abstract-short" style="display: inline;"> This paper presents ETA-IK, a novel Execution-Time-Aware Inverse Kinematics method tailored for dual-arm robotic systems. The primary goal is to optimize motion execution time by leveraging the redundancy of both arms, specifically in tasks where only the relative pose of the robots is constrained, such as dual-arm scanning of unknown objects. Unlike traditional inverse kinematics methods that use… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14381v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14381v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14381v1-abstract-full" style="display: none;"> This paper presents ETA-IK, a novel Execution-Time-Aware Inverse Kinematics method tailored for dual-arm robotic systems. The primary goal is to optimize motion execution time by leveraging the redundancy of both arms, specifically in tasks where only the relative pose of the robots is constrained, such as dual-arm scanning of unknown objects. Unlike traditional inverse kinematics methods that use surrogate metrics such as joint configuration distance, our method incorporates direct motion execution time and implicit collisions into the optimization process, thereby finding target joints that allow subsequent trajectory generation to get more efficient and collision-free motion. A neural network based execution time approximator is employed to predict time-efficient joint configurations while accounting for potential collisions. Through experimental evaluation on a system composed of a UR5 and a KUKA iiwa robot, we demonstrate significant reductions in execution time. The proposed method outperforms conventional approaches, showing improved motion efficiency without sacrificing positioning accuracy. These results highlight the potential of ETA-IK to improve the performance of dual-arm systems in applications, where efficiency and safety are paramount. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14381v1-abstract-full').style.display = 'none'; document.getElementById('2411.14381v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.20279">arXiv:2410.20279</a> <span> [<a href="https://arxiv.org/pdf/2410.20279">pdf</a>, <a href="https://arxiv.org/format/2410.20279">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> HIRO: Heuristics Informed Robot Online Path Planning Using Pre-computed Deterministic Roadmaps </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Huang%2C+X">Xi Huang</a>, <a href="/search/cs?searchtype=author&query=S%C3%B3ti%2C+G">Gergely S贸ti</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+H">Hongyi Zhou</a>, <a href="/search/cs?searchtype=author&query=Ledermann%2C+C">Christoph Ledermann</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a>, <a href="/search/cs?searchtype=author&query=Kr%C3%B6ger%2C+T">Torsten Kr枚ger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.20279v1-abstract-short" style="display: inline;"> With the goal of efficiently computing collision-free robot motion trajectories in dynamically changing environments, we present results of a novel method for Heuristics Informed Robot Online Path Planning (HIRO). Dividing robot environments into static and dynamic elements, we use the static part for initializing a deterministic roadmap, which provides a lower bound of the final path cost as info… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20279v1-abstract-full').style.display = 'inline'; document.getElementById('2410.20279v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.20279v1-abstract-full" style="display: none;"> With the goal of efficiently computing collision-free robot motion trajectories in dynamically changing environments, we present results of a novel method for Heuristics Informed Robot Online Path Planning (HIRO). Dividing robot environments into static and dynamic elements, we use the static part for initializing a deterministic roadmap, which provides a lower bound of the final path cost as informed heuristics for fast path-finding. These heuristics guide a search tree to explore the roadmap during runtime. The search tree examines the edges using a fuzzy collision checking concerning the dynamic environment. Finally, the heuristics tree exploits knowledge fed back from the fuzzy collision checking module and updates the lower bound for the path cost. As we demonstrate in real-world experiments, the closed-loop formed by these three components significantly accelerates the planning procedure. An additional backtracking step ensures the feasibility of the resulting paths. Experiments in simulation and the real world show that HIRO can find collision-free paths considerably faster than baseline methods with and without prior knowledge of the environment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20279v1-abstract-full').style.display = 'none'; document.getElementById('2410.20279v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.20272">arXiv:2410.20272</a> <span> [<a href="https://arxiv.org/pdf/2410.20272">pdf</a>, <a href="https://arxiv.org/format/2410.20272">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Planning with Learned Subgoals Selected by Temporal Information </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Huang%2C+X">Xi Huang</a>, <a href="/search/cs?searchtype=author&query=S%C3%B3ti%2C+G">Gergely S贸ti</a>, <a href="/search/cs?searchtype=author&query=Ledermann%2C+C">Christoph Ledermann</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a>, <a href="/search/cs?searchtype=author&query=Kr%C3%B6ger%2C+T">Torsten Kr枚ger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.20272v1-abstract-short" style="display: inline;"> Path planning in a changing environment is a challenging task in robotics, as moving objects impose time-dependent constraints. Recent planning methods primarily focus on the spatial aspects, lacking the capability to directly incorporate time constraints. In this paper, we propose a method that leverages a generative model to decompose a complex planning problem into small manageable ones by incr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20272v1-abstract-full').style.display = 'inline'; document.getElementById('2410.20272v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.20272v1-abstract-full" style="display: none;"> Path planning in a changing environment is a challenging task in robotics, as moving objects impose time-dependent constraints. Recent planning methods primarily focus on the spatial aspects, lacking the capability to directly incorporate time constraints. In this paper, we propose a method that leverages a generative model to decompose a complex planning problem into small manageable ones by incrementally generating subgoals given the current planning context. Then, we take into account the temporal information and use learned time estimators based on different statistic distributions to examine and select the generated subgoal candidates. Experiments show that planning from the current robot state to the selected subgoal can satisfy the given time-dependent constraints while being goal-oriented. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20272v1-abstract-full').style.display = 'none'; document.getElementById('2410.20272v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.09939">arXiv:2406.09939</a> <span> [<a href="https://arxiv.org/pdf/2406.09939">pdf</a>, <a href="https://arxiv.org/format/2406.09939">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> dGrasp: NeRF-Informed Implicit Grasp Policies with Supervised Optimization Slopes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=S%C3%B3ti%2C+G">Gergely S贸ti</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+X">Xi Huang</a>, <a href="/search/cs?searchtype=author&query=Wurll%2C+C">Christian Wurll</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.09939v2-abstract-short" style="display: inline;"> We present dGrasp, an implicit grasp policy with an enhanced optimization landscape. This landscape is defined by a NeRF-informed grasp value function. The neural network representing this function is trained on simulated grasp demonstrations. During training, we use an auxiliary loss to guide not only the weight updates of this network but also the update how the slope of the optimization landsca… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.09939v2-abstract-full').style.display = 'inline'; document.getElementById('2406.09939v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.09939v2-abstract-full" style="display: none;"> We present dGrasp, an implicit grasp policy with an enhanced optimization landscape. This landscape is defined by a NeRF-informed grasp value function. The neural network representing this function is trained on simulated grasp demonstrations. During training, we use an auxiliary loss to guide not only the weight updates of this network but also the update how the slope of the optimization landscape changes. This loss is computed on the demonstrated grasp trajectory and the gradients of the landscape. With second order optimization, we incorporate valuable information from the trajectory as well as facilitate the optimization process of the implicit policy. Experiments demonstrate that employing this auxiliary loss improves policies' performance in simulation as well as their zero-shot transfer to the real-world. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.09939v2-abstract-full').style.display = 'none'; document.getElementById('2406.09939v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.07935">arXiv:2401.07935</a> <span> [<a href="https://arxiv.org/pdf/2401.07935">pdf</a>, <a href="https://arxiv.org/format/2401.07935">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> 6-DoF Grasp Pose Evaluation and Optimization via Transfer Learning from NeRFs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=S%C3%B3ti%2C+G">Gergely S贸ti</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+X">Xi Huang</a>, <a href="/search/cs?searchtype=author&query=Wurll%2C+C">Christian Wurll</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.07935v1-abstract-short" style="display: inline;"> We address the problem of robotic grasping of known and unknown objects using implicit behavior cloning. We train a grasp evaluation model from a small number of demonstrations that outputs higher values for grasp candidates that are more likely to succeed in grasping. This evaluation model serves as an objective function, that we maximize to identify successful grasps. Key to our approach is the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.07935v1-abstract-full').style.display = 'inline'; document.getElementById('2401.07935v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.07935v1-abstract-full" style="display: none;"> We address the problem of robotic grasping of known and unknown objects using implicit behavior cloning. We train a grasp evaluation model from a small number of demonstrations that outputs higher values for grasp candidates that are more likely to succeed in grasping. This evaluation model serves as an objective function, that we maximize to identify successful grasps. Key to our approach is the utilization of learned implicit representations of visual and geometric features derived from a pre-trained NeRF. Though trained exclusively in a simulated environment with simplified objects and 4-DoF top-down grasps, our evaluation model and optimization procedure demonstrate generalization to 6-DoF grasps and novel objects both in simulation and in real-world settings, without the need for additional data. Supplementary material is available at: https://gergely-soti.github.io/grasp <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.07935v1-abstract-full').style.display = 'none'; document.getElementById('2401.07935v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.08040">arXiv:2309.08040</a> <span> [<a href="https://arxiv.org/pdf/2309.08040">pdf</a>, <a href="https://arxiv.org/format/2309.08040">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Gradient based Grasp Pose Optimization on a NeRF that Approximates Grasp Success </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=S%C3%B3ti%2C+G">Gergely S贸ti</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a>, <a href="/search/cs?searchtype=author&query=Wurll%2C+C">Christian Wurll</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.08040v1-abstract-short" style="display: inline;"> Current robotic grasping methods often rely on estimating the pose of the target object, explicitly predicting grasp poses, or implicitly estimating grasp success probabilities. In this work, we propose a novel approach that directly maps gripper poses to their corresponding grasp success values, without considering objectness. Specifically, we leverage a Neural Radiance Field (NeRF) architecture… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.08040v1-abstract-full').style.display = 'inline'; document.getElementById('2309.08040v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.08040v1-abstract-full" style="display: none;"> Current robotic grasping methods often rely on estimating the pose of the target object, explicitly predicting grasp poses, or implicitly estimating grasp success probabilities. In this work, we propose a novel approach that directly maps gripper poses to their corresponding grasp success values, without considering objectness. Specifically, we leverage a Neural Radiance Field (NeRF) architecture to learn a scene representation and use it to train a grasp success estimator that maps each pose in the robot's task space to a grasp success value. We employ this learned estimator to tune its inputs, i.e., grasp poses, by gradient-based optimization to obtain successful grasp poses. Contrary to other NeRF-based methods which enhance existing grasp pose estimation approaches by relying on NeRF's rendering capabilities or directly estimate grasp poses in a discretized space using NeRF's scene representation capabilities, our approach uniquely sidesteps both the need for rendering and the limitation of discretization. We demonstrate the effectiveness of our approach on four simulated 3DoF (Degree of Freedom) robotic grasping tasks and show that it can generalize to novel objects. Our best model achieves an average translation error of 3mm from valid grasp poses. This work opens the door for future research to apply our approach to higher DoF grasps and real-world scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.08040v1-abstract-full').style.display = 'none'; document.getElementById('2309.08040v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.08359">arXiv:2307.08359</a> <span> [<a href="https://arxiv.org/pdf/2307.08359">pdf</a>, <a href="https://arxiv.org/format/2307.08359">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-031-44981-9_21">10.1007/978-3-031-44981-9_21 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Human Emergency Detection during Autonomous Hospital Transports </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zachariae%2C+A">Andreas Zachariae</a>, <a href="/search/cs?searchtype=author&query=Widera%2C+J">Julia Widera</a>, <a href="/search/cs?searchtype=author&query=Plahl%2C+F">Frederik Plahl</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a>, <a href="/search/cs?searchtype=author&query=Wurll%2C+C">Christian Wurll</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.08359v1-abstract-short" style="display: inline;"> Human transports in hospitals are labor-intensive and primarily performed in beds to save time. This transfer method does not promote the mobility or autonomy of the patient. To relieve the caregivers from this time-consuming task, a mobile robot is developed to autonomously transport humans around the hospital. It provides different transfer modes including walking and sitting in a wheelchair. Th… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.08359v1-abstract-full').style.display = 'inline'; document.getElementById('2307.08359v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.08359v1-abstract-full" style="display: none;"> Human transports in hospitals are labor-intensive and primarily performed in beds to save time. This transfer method does not promote the mobility or autonomy of the patient. To relieve the caregivers from this time-consuming task, a mobile robot is developed to autonomously transport humans around the hospital. It provides different transfer modes including walking and sitting in a wheelchair. The problem that this paper focuses on is to detect emergencies and ensure the well-being of the patient during the transport. For this purpose, the patient is tracked and monitored with a camera system. OpenPose is used for Human Pose Estimation and a trained classifier for emergency detection. We collected and published a dataset of 18,000 images in lab and hospital environments. It differs from related work because we have a moving robot with different transfer modes in a highly dynamic environment with multiple people in the scene using only RGB-D data. To improve the critical recall metric, we apply threshold moving and a time delay. We compare different models with an AutoML approach. This paper shows that emergencies while walking are best detected by a SVM with a recall of 95.8% on single frames. In the case of sitting transport, the best model achieves a recall of 62.2%. The contribution is to establish a baseline on this new dataset and to provide a proof of concept for the human emergency detection in this use case. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.08359v1-abstract-full').style.display = 'none'; document.getElementById('2307.08359v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint of the corresponding IAS18-2023 conference publication (Proceedings of the 18th International Conference IAS-18)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.16017">arXiv:2303.16017</a> <span> [<a href="https://arxiv.org/pdf/2303.16017">pdf</a>, <a href="https://arxiv.org/format/2303.16017">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Inside-out Infrared Marker Tracking via Head Mounted Displays for Smart Robot Programming </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Puljiz%2C+D">David Puljiz</a>, <a href="/search/cs?searchtype=author&query=Vasilache%2C+A">Alexandru-George Vasilache</a>, <a href="/search/cs?searchtype=author&query=Mende%2C+M">Michael Mende</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.16017v1-abstract-short" style="display: inline;"> Intuitive robot programming through use of tracked smart input devices relies on fixed, external tracking systems, most often employing infra-red markers. Such an approach is frequently combined with projector-based augmented reality for better visualisation and interface. The combined system, although providing an intuitive programming platform with short cycle times even for inexperienced users,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.16017v1-abstract-full').style.display = 'inline'; document.getElementById('2303.16017v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.16017v1-abstract-full" style="display: none;"> Intuitive robot programming through use of tracked smart input devices relies on fixed, external tracking systems, most often employing infra-red markers. Such an approach is frequently combined with projector-based augmented reality for better visualisation and interface. The combined system, although providing an intuitive programming platform with short cycle times even for inexperienced users, is immobile, expensive and requires extensive calibration. When faced with a changing environment and large number of robots it becomes sorely impractical. Here we present our work on infra-red marker tracking using the Microsoft HoloLens head-mounted display. The HoloLens can map the environment, register the robot on-line, and track smart devices equipped with infra-red markers in the robot coordinate system. We envision our work to provide the basis to transfer many of the paradigms developed over the years for systems requiring a projector and a tracked input device into a highly-portable system that does not require any calibration or special set-up. We test the quality of the marker-tracking in an industrial robot cell and compare our tracking with a ground truth obtained via an ART-3 tracking system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.16017v1-abstract-full').style.display = 'none'; document.getElementById('2303.16017v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">As accepted to the 5th International Workshop on Virtual, Augmented, and Mixed Reality for HRI</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.09000">arXiv:2302.09000</a> <span> [<a href="https://arxiv.org/pdf/2302.09000">pdf</a>, <a href="https://arxiv.org/format/2302.09000">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Train What You Know -- Precise Pick-and-Place with Transporter Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=S%C3%B3ti%2C+G">Gergely S贸ti</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+X">Xi Huang</a>, <a href="/search/cs?searchtype=author&query=Wurll%2C+C">Christian Wurll</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.09000v1-abstract-short" style="display: inline;"> Precise pick-and-place is essential in robotic applications. To this end, we define a novel exact training method and an iterative inference method that improve pick-and-place precision with Transporter Networks. We conduct a large scale experiment on 8 simulated tasks. A systematic analysis shows, that the proposed modifications have a significant positive effect on model performance. Considering… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.09000v1-abstract-full').style.display = 'inline'; document.getElementById('2302.09000v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.09000v1-abstract-full" style="display: none;"> Precise pick-and-place is essential in robotic applications. To this end, we define a novel exact training method and an iterative inference method that improve pick-and-place precision with Transporter Networks. We conduct a large scale experiment on 8 simulated tasks. A systematic analysis shows, that the proposed modifications have a significant positive effect on model performance. Considering picking and placing independently, our methods achieve up to 60% lower rotation and translation errors than baselines. For the whole pick-and-place process we observe 50% lower rotation errors for most tasks with slight improvements in terms of translation errors. Furthermore, we propose architectural changes that retain model performance and reduce computational costs and time. We validate our methods with an interactive teaching procedure on real hardware. Supplementary material will be made available at: https://gergely-soti.github.io/p <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.09000v1-abstract-full').style.display = 'none'; document.getElementById('2302.09000v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.03538">arXiv:2204.03538</a> <span> [<a href="https://arxiv.org/pdf/2204.03538">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Updating Industrial Robots for Emerging Technologies </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Puljiz%2C+D">David Puljiz</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.03538v2-abstract-short" style="display: inline;"> Industrial arms need to evolve beyond their standard shape to embrace new and emerging technologies. In this paper, we shall first perform an analysis of four popular but different modern industrial robot arms. By seeing the common trends we will try to extrapolate and expand these trends for the future. Here, particular focus will be on interaction based on augmented reality (AR) through head-mou… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.03538v2-abstract-full').style.display = 'inline'; document.getElementById('2204.03538v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.03538v2-abstract-full" style="display: none;"> Industrial arms need to evolve beyond their standard shape to embrace new and emerging technologies. In this paper, we shall first perform an analysis of four popular but different modern industrial robot arms. By seeing the common trends we will try to extrapolate and expand these trends for the future. Here, particular focus will be on interaction based on augmented reality (AR) through head-mounted displays (HMD), but also through smartphones. Long-term human-robot interaction and personalization of said interaction will also be considered. The use of AR in human-robot interaction has proven to enhance communication and information exchange. A basic addition to industrial arm design would be the integration of QR markers on the robot, both for accessing information and adding tracking capabilities to more easily display AR overlays. In a recent example of information access, Mercedes Benz added QR markers on their cars to help rescue workers estimate the best places to cut and evacuate people after car crashes. One has also to deal with safety in an environment that will be more and more about collaboration. The QR markers can therefore be combined with RF-based ranging modules, developed in the EU-project SafeLog, that can be used both for safety as well as for tracking of human positions while in close proximity interactions with the industrial arms. The industrial arms of the future should also be intuitive to program and interact with. This would be achieved through AR and head mounted displays as well as the already mentioned RF-based person tracking. Finally, a more personalized interaction between the robots and humans can be achieved through life-long learning AI and disembodied, personalized agents. We propose a design that not only exists in the physical world, but also partly in the digital world of mixed reality. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.03538v2-abstract-full').style.display = 'none'; document.getElementById('2204.03538v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">As accepted to the 2nd International Workshop on Designerly HRI; HRI 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.11512">arXiv:2202.11512</a> <span> [<a href="https://arxiv.org/pdf/2202.11512">pdf</a>, <a href="https://arxiv.org/format/2202.11512">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Using Deep Reinforcement Learning with Automatic Curriculum Learning for Mapless Navigation in Intralogistics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Xue%2C+H">Honghu Xue</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Benedikt Hein</a>, <a href="/search/cs?searchtype=author&query=Bakr%2C+M">Mohamed Bakr</a>, <a href="/search/cs?searchtype=author&query=Schildbach%2C+G">Georg Schildbach</a>, <a href="/search/cs?searchtype=author&query=Abel%2C+B">Bengt Abel</a>, <a href="/search/cs?searchtype=author&query=Rueckert%2C+E">Elmar Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.11512v2-abstract-short" style="display: inline;"> We propose a deep reinforcement learning approach for solving a mapless navigation problem in warehouse scenarios. In our approach, an automation guided vehicle is equipped with LiDAR and frontal RGB sensors and learns to perform a targeted navigation task. The challenges reside in the sparseness of positive samples for learning, multi-modal sensor perception with partial observability, the demand… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.11512v2-abstract-full').style.display = 'inline'; document.getElementById('2202.11512v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.11512v2-abstract-full" style="display: none;"> We propose a deep reinforcement learning approach for solving a mapless navigation problem in warehouse scenarios. In our approach, an automation guided vehicle is equipped with LiDAR and frontal RGB sensors and learns to perform a targeted navigation task. The challenges reside in the sparseness of positive samples for learning, multi-modal sensor perception with partial observability, the demand for accurate steering maneuvers together with long training cycles. To address these points, we propose NavACL-Q as a method for automatic curriculum learning in combination with a distributed version of the soft actor-critic algorithm. The performance of the learning algorithm is evaluated exhaustively in an unseen warehouse environment to validate both robustness and generalizability of the learned policy. Results in NVIDIA Isaac Sim demonstrates that our trained agent significantly outperforms a map-based navigation pipeline provided by NVIDIA Isaac Sim with an increased agent-goal distance of 3m and wider initial relative agent-goal rotations of 45 degree. The ablation studies also suggests that NavACL-Q greatly facilitates the learning process with a performance gain of roughly 40% compared to training with random starts and that the utilization of a pre-trained feature extractor manifestly boosts the performance by approximately 60%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.11512v2-abstract-full').style.display = 'none'; document.getElementById('2202.11512v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.07206">arXiv:2108.07206</a> <span> [<a href="https://arxiv.org/pdf/2108.07206">pdf</a>, <a href="https://arxiv.org/format/2108.07206">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TRO.2021.3111786">10.1109/TRO.2021.3111786 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Proximity Perception in Human-Centered Robotics: A Survey on Sensing Systems and Applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Navarro%2C+S+E">Stefan Escaida Navarro</a>, <a href="/search/cs?searchtype=author&query=M%C3%BChlbacher-Karrer%2C+S">Stephan M眉hlbacher-Karrer</a>, <a href="/search/cs?searchtype=author&query=Alagi%2C+H">Hosam Alagi</a>, <a href="/search/cs?searchtype=author&query=Zangl%2C+H">Hubert Zangl</a>, <a href="/search/cs?searchtype=author&query=Koyama%2C+K">Keisuke Koyama</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a>, <a href="/search/cs?searchtype=author&query=Duriez%2C+C">Christian Duriez</a>, <a href="/search/cs?searchtype=author&query=Smith%2C+J+R">Joshua R. Smith</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.07206v2-abstract-short" style="display: inline;"> Proximity perception is a technology that has the potential to play an essential role in the future of robotics. It can fulfill the promise of safe, robust, and autonomous systems in industry and everyday life, alongside humans, as well as in remote locations in space and underwater. In this survey paper, we cover the developments of this field from the early days up to the present, with a focus o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.07206v2-abstract-full').style.display = 'inline'; document.getElementById('2108.07206v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.07206v2-abstract-full" style="display: none;"> Proximity perception is a technology that has the potential to play an essential role in the future of robotics. It can fulfill the promise of safe, robust, and autonomous systems in industry and everyday life, alongside humans, as well as in remote locations in space and underwater. In this survey paper, we cover the developments of this field from the early days up to the present, with a focus on human-centered robotics. Here, proximity sensors are typically deployed in two scenarios: first, on the exterior of manipulator arms to support safety and interaction functionality, and second, on the inside of grippers or hands to support grasping and exploration. Starting from this observation, we propose a categorization for the approaches found in the literature. To provide a basis for understanding these approaches, we devote effort to present the technologies and different measuring principles that were developed over the years, also providing a summary in form of a table. Then, we show the diversity of applications that have been presented in the literature. Finally, we give an overview of the most important trends that will shape the future of this domain. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.07206v2-abstract-full').style.display = 'none'; document.getElementById('2108.07206v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Transactions on Robotics 2021 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.11162">arXiv:2102.11162</a> <span> [<a href="https://arxiv.org/pdf/2102.11162">pdf</a>, <a href="https://arxiv.org/format/2102.11162">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> HAIR: Head-mounted AR Intention Recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Puljiz%2C+D">David Puljiz</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+B">Bowen Zhou</a>, <a href="/search/cs?searchtype=author&query=Ma%2C+K">Ke Ma</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.11162v1-abstract-short" style="display: inline;"> Human teams exhibit both implicit and explicit intention sharing. To further development of human-robot collaboration, intention recognition is crucial on both sides. Present approaches rely on a vast sensor suite on and around the robot to achieve intention recognition. This relegates intuitive human-robot collaboration purely to such bulky systems, which are inadequate for large-scale, real-worl… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.11162v1-abstract-full').style.display = 'inline'; document.getElementById('2102.11162v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.11162v1-abstract-full" style="display: none;"> Human teams exhibit both implicit and explicit intention sharing. To further development of human-robot collaboration, intention recognition is crucial on both sides. Present approaches rely on a vast sensor suite on and around the robot to achieve intention recognition. This relegates intuitive human-robot collaboration purely to such bulky systems, which are inadequate for large-scale, real-world scenarios due to their complexity and cost. In this paper we propose an intention recognition system that is based purely on a portable head-mounted display. In addition robot intention visualisation is also supported. We present experiments to show the quality of our human goal estimation component and some basic interactions with an industrial robot. HAIR should raise the quality of interaction between robots and humans, instead of such interactions raising the hair on the necks of the human coworkers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.11162v1-abstract-full').style.display = 'none'; document.getElementById('2102.11162v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">As accepted to the 4th Virtual, Augmented and Mixed reality Human-robot interaction workshop (VAM-HRI), HRI 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.05854">arXiv:2012.05854</a> <span> [<a href="https://arxiv.org/pdf/2012.05854">pdf</a>, <a href="https://arxiv.org/ps/2012.05854">ps</a>, <a href="https://arxiv.org/format/2012.05854">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Adapting the Human: Leveraging Wearable Technology in HRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Puljiz%2C+D">David Puljiz</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.05854v1-abstract-short" style="display: inline;"> Adhering to current HRI paradigms, all of the sensors, visualisation and legibility of actions and motions are borne by the robot or its working cell. This necessarily makes robots more complex or confines them into specialised, structured environments. We propose leveraging the state of the art of wearable technologies, such as augmented reality head mounted displays, smart watches, sensor tags a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.05854v1-abstract-full').style.display = 'inline'; document.getElementById('2012.05854v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.05854v1-abstract-full" style="display: none;"> Adhering to current HRI paradigms, all of the sensors, visualisation and legibility of actions and motions are borne by the robot or its working cell. This necessarily makes robots more complex or confines them into specialised, structured environments. We propose leveraging the state of the art of wearable technologies, such as augmented reality head mounted displays, smart watches, sensor tags and radio-frequency ranging, to "adapt" the human and reduce the requirements and complexity of robots. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.05854v1-abstract-full').style.display = 'none'; document.getElementById('2012.05854v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">As submitted to the Robot Metaphors Workshop, ICSR 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.12651">arXiv:2005.12651</a> <span> [<a href="https://arxiv.org/pdf/2005.12651">pdf</a>, <a href="https://arxiv.org/format/2005.12651">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> What the HoloLens Maps Is Your Workspace: Fast Mapping and Set-up of Robot Cells via Head Mounted Displays and Augmented Reality </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Puljiz%2C+D">David Puljiz</a>, <a href="/search/cs?searchtype=author&query=Krebs%2C+F">Franziska Krebs</a>, <a href="/search/cs?searchtype=author&query=B%C3%B6sing%2C+F">Fabian B枚sing</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.12651v1-abstract-short" style="display: inline;"> Classical methods of modelling and mapping robot work cells are time consuming, expensive and involve expert knowledge. We present a novel approach to mapping and cell setup using modern Head Mounted Displays (HMDs) that possess self-localisation and mapping capabilities. We leveraged these capabilities to create a point cloud of the environment and build an OctoMap - a voxel occupancy grid repres… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.12651v1-abstract-full').style.display = 'inline'; document.getElementById('2005.12651v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.12651v1-abstract-full" style="display: none;"> Classical methods of modelling and mapping robot work cells are time consuming, expensive and involve expert knowledge. We present a novel approach to mapping and cell setup using modern Head Mounted Displays (HMDs) that possess self-localisation and mapping capabilities. We leveraged these capabilities to create a point cloud of the environment and build an OctoMap - a voxel occupancy grid representation of the robot's workspace for path planning. Through the use of Augmented Reality (AR) interactions, the user can edit the created Octomap and add security zones. We perform comprehensive tests of the HoloLens' depth sensing capabilities and the quality of the resultant point cloud. A high-end laser scanner is used to provide the ground truth for the evaluation of the point cloud quality. The amount of false-positive and false-negative voxels in the OctoMap are also tested. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.12651v1-abstract-full').style.display = 'none'; document.getElementById('2005.12651v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">As submited to IROS 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1910.04494">arXiv:1910.04494</a> <span> [<a href="https://arxiv.org/pdf/1910.04494">pdf</a>, <a href="https://arxiv.org/format/1910.04494">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Concepts for End-to-end Augmented Reality based Human-Robot Interaction Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Puljiz%2C+D">David Puljiz</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1910.04494v1-abstract-short" style="display: inline;"> The field of Augmented Reality (AR) based Human Robot Interaction (HRI) has progressed significantly since its inception more than two decades ago. With more advanced devices, particularly head-mounted displays (HMD), freely available programming environments and better connectivity, the possible application space expanded significantly. Here we present concepts and systems currently being develop… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.04494v1-abstract-full').style.display = 'inline'; document.getElementById('1910.04494v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1910.04494v1-abstract-full" style="display: none;"> The field of Augmented Reality (AR) based Human Robot Interaction (HRI) has progressed significantly since its inception more than two decades ago. With more advanced devices, particularly head-mounted displays (HMD), freely available programming environments and better connectivity, the possible application space expanded significantly. Here we present concepts and systems currently being developed at our lab to enable a truly end-to-end application of AR in HRI, from setting up the working environment of the robot, through programming and finally interaction with the programmed robot. Relevant papers by other authors will also be overviewed. We demonstrate the use of such technologies with systems not inherently designed to be collaborative, namely industrial manipulators. By trying to make such industrial systems easily-installable, collaborative and interactive, the vision of universal robot co-workers can be pushed one step closer to reality. The main goal of the paper is to provide a short overview of the capabilities of HMD-based HRI to researchers unfamiliar with the concepts. For researchers already using such techniques, the hope is to perhaps introduce some new ideas and to broaden the field of research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.04494v1-abstract-full').style.display = 'none'; document.getElementById('1910.04494v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">As accepted to the "Factory of the Future" workshop, IROS 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1908.04692">arXiv:1908.04692</a> <span> [<a href="https://arxiv.org/pdf/1908.04692">pdf</a>, <a href="https://arxiv.org/format/1908.04692">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> General Hand Guidance Framework using Microsoft HoloLens </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Puljiz%2C+D">David Puljiz</a>, <a href="/search/cs?searchtype=author&query=St%C3%B6hr%2C+E">Erik St枚hr</a>, <a href="/search/cs?searchtype=author&query=Riesterer%2C+K+S">Katharina S. Riesterer</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a>, <a href="/search/cs?searchtype=author&query=Kr%C3%B6ger%2C+T">Torsten Kr枚ger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1908.04692v1-abstract-short" style="display: inline;"> Hand guidance emerged from the safety requirements for collaborative robots, namely possessing joint-torque sensors. Since then it has proven to be a powerful tool for easy trajectory programming, allowing lay-users to reprogram robots intuitively. Going beyond, a robot can learn tasks by user demonstrations through kinesthetic teaching, enabling robots to generalise tasks and further reducing the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.04692v1-abstract-full').style.display = 'inline'; document.getElementById('1908.04692v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1908.04692v1-abstract-full" style="display: none;"> Hand guidance emerged from the safety requirements for collaborative robots, namely possessing joint-torque sensors. Since then it has proven to be a powerful tool for easy trajectory programming, allowing lay-users to reprogram robots intuitively. Going beyond, a robot can learn tasks by user demonstrations through kinesthetic teaching, enabling robots to generalise tasks and further reducing the need for reprogramming. However, hand guidance is still mostly relegated to collaborative robots. Here we propose a method that doesn't require any sensors on the robot or in the robot cell, by using a Microsoft HoloLens augmented reality head mounted display. We reference the robot using a registration algorithm to match the robot model to the spatial mesh. The in-built hand tracking and localisation capabilities are then used to calculate the position of the hands relative to the robot. By decomposing the hand movements into orthogonal rotations and propagating it down through the kinematic chain, we achieve a generalised hand guidance without the need to build a dynamic model of the robot itself. We tested our approach on a commonly used industrial manipulator, the KUKA KR-5. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.04692v1-abstract-full').style.display = 'none'; document.getElementById('1908.04692v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">As accepted to IROS 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1906.04240">arXiv:1906.04240</a> <span> [<a href="https://arxiv.org/pdf/1906.04240">pdf</a>, <a href="https://arxiv.org/format/1906.04240">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ETFA.2019.8869456">10.1109/ETFA.2019.8869456 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Interpreting OWL Complex Classes in AutomationML based on Bidirectional Translation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hua%2C+Y">Yingbing Hua</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1906.04240v1-abstract-short" style="display: inline;"> The World Wide Web Consortium (W3C) has published several recommendations for building and storing ontologies, including the most recent OWL 2 Web Ontology Language (OWL). These initiatives have been followed by practical implementations that popularize OWL in various domains. For example, OWL has been used for conceptual modeling in industrial engineering, and its reasoning facilities are used to… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1906.04240v1-abstract-full').style.display = 'inline'; document.getElementById('1906.04240v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1906.04240v1-abstract-full" style="display: none;"> The World Wide Web Consortium (W3C) has published several recommendations for building and storing ontologies, including the most recent OWL 2 Web Ontology Language (OWL). These initiatives have been followed by practical implementations that popularize OWL in various domains. For example, OWL has been used for conceptual modeling in industrial engineering, and its reasoning facilities are used to provide a wealth of services, e.g. model diagnosis, automated code generation, and semantic integration. More specifically, recent studies have shown that OWL is well suited for harmonizing information of engineering tools stored as AutomationML (AML) files. However, OWL and its tools can be cumbersome for direct use by engineers such that an ontology expert is often required in practice. Although much attention has been paid in the literature to overcome this issue by transforming OWL ontologies from/to AML models automatically, dealing with OWL complex classes remains an open research question. In this paper, we introduce the AML concept models for representing OWL complex classes in AutomationML, and present algorithms for the bidirectional translation between OWL complex classes and their corresponding AML concept models. We show that this approach provides an efficient and intuitive interface for nonexperts to visualize, modify, and create OWL complex classes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1906.04240v1-abstract-full').style.display = 'none'; document.getElementById('1906.04240v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 June, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">As accepted to IEEE 24th International Conference on Emerging Technologies and Factory Automation (ETFA 2019)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.02480">arXiv:1904.02480</a> <span> [<a href="https://arxiv.org/pdf/1904.02480">pdf</a>, <a href="https://arxiv.org/format/1904.02480">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Referencing between a Head-Mounted Device and Robotic Manipulators </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Puljiz%2C+D">David Puljiz</a>, <a href="/search/cs?searchtype=author&query=Riesterer%2C+K+S">Katharina S. Riesterer</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a>, <a href="/search/cs?searchtype=author&query=Kr%C3%B6ger%2C+T">Torsten Kr枚ger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.02480v1-abstract-short" style="display: inline;"> Having a precise and robust transformation between the robot coordinate system and the AR-device coordinate system is paramount during human-robot interaction (HRI) based on augmented reality using Head mounted displays (HMD), both for intuitive information display and for the tracking of human motions. Most current solutions in this area rely either on the tracking of visual markers, e.g. QR code… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.02480v1-abstract-full').style.display = 'inline'; document.getElementById('1904.02480v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.02480v1-abstract-full" style="display: none;"> Having a precise and robust transformation between the robot coordinate system and the AR-device coordinate system is paramount during human-robot interaction (HRI) based on augmented reality using Head mounted displays (HMD), both for intuitive information display and for the tracking of human motions. Most current solutions in this area rely either on the tracking of visual markers, e.g. QR codes, or on manual referencing, both of which provide unsatisfying results. Meanwhile a plethora of object detection and referencing methods exist in the wider robotic and machine vision communities. The precision of the referencing is likewise almost never measured. Here we would like to address this issue by firstly presenting an overview of currently used referencing methods between robots and HMDs. This is followed by a brief overview of object detection and referencing methods used in the field of robotics. Based on these methods we suggest three classes of referencing algorithms we intend to pursue - semi-automatic, on-shot; automatic, one-shot; and automatic continuous. We describe the general workflows of these three classes as well as describing our proposed algorithms in each of these classes. Finally we present the first experimental results of a semi-automatic referencing algorithm, tested on an industrial KUKA KR-5 manipulator. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.02480v1-abstract-full').style.display = 'none'; document.getElementById('1904.02480v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">As submitted to the 2nd VAM-HRI workshop, HRI 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1901.04933">arXiv:1901.04933</a> <span> [<a href="https://arxiv.org/pdf/1901.04933">pdf</a>, <a href="https://arxiv.org/ps/1901.04933">ps</a>, <a href="https://arxiv.org/format/1901.04933">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Sensorless Hand Guidance using Microsoft Hololens </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Puljiz%2C+D">David Puljiz</a>, <a href="/search/cs?searchtype=author&query=St%C3%B6hr%2C+E">Erik St枚hr</a>, <a href="/search/cs?searchtype=author&query=Riesterer%2C+K+S">Katharina S. Riesterer</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a>, <a href="/search/cs?searchtype=author&query=Kr%C3%B6ger%2C+T">Torsten Kr枚ger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1901.04933v1-abstract-short" style="display: inline;"> Hand guidance of robots has proven to be a useful tool both for programming trajectories and in kinesthetic teaching. However hand guidance is usually relegated to robots possessing joint-torque sensors (JTS). Here we propose to extend hand guidance to robots lacking those sensors through the use of an Augmented Reality (AR) device, namely Microsoft's Hololens. Augmented reality devices have been… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.04933v1-abstract-full').style.display = 'inline'; document.getElementById('1901.04933v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1901.04933v1-abstract-full" style="display: none;"> Hand guidance of robots has proven to be a useful tool both for programming trajectories and in kinesthetic teaching. However hand guidance is usually relegated to robots possessing joint-torque sensors (JTS). Here we propose to extend hand guidance to robots lacking those sensors through the use of an Augmented Reality (AR) device, namely Microsoft's Hololens. Augmented reality devices have been envisioned as a helpful addition to ease both robot programming and increase situational awareness of humans working in close proximity to robots. We reference the robot by using a registration algorithm to match a robot model to the spatial mesh. The in-built hand tracking capabilities are then used to calculate the position of the hands relative to the robot. By decomposing the hand movements into orthogonal rotations we achieve a completely sensorless hand guidance without any need to build a dynamic model of the robot itself. We did the first tests our approach on a commonly used industrial manipulator, the KUKA KR-5. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.04933v1-abstract-full').style.display = 'none'; document.getElementById('1901.04933v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 January, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to HRI2019 - Late Breaking reports</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.08269">arXiv:1811.08269</a> <span> [<a href="https://arxiv.org/pdf/1811.08269">pdf</a>, <a href="https://arxiv.org/format/1811.08269">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.rcim.2018.11.004">10.1016/j.rcim.2018.11.004 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Human Intention Estimation based on Hidden Markov Model Motion Validation for Safe Flexible Robotized Warehouses </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Petkovi%C4%87%2C+T">Tomislav Petkovi膰</a>, <a href="/search/cs?searchtype=author&query=Puljiz%2C+D">David Puljiz</a>, <a href="/search/cs?searchtype=author&query=Markovi%C4%87%2C+I">Ivan Markovi膰</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.08269v1-abstract-short" style="display: inline;"> With the substantial growth of logistics businesses the need for larger warehouses and their automation arises, thus using robots as assistants to human workers is becoming a priority. In order to operate efficiently and safely, robot assistants or the supervising system should recognize human intentions in real-time. Theory of mind (ToM) is an intuitive human conception of other humans' mental st… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08269v1-abstract-full').style.display = 'inline'; document.getElementById('1811.08269v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.08269v1-abstract-full" style="display: none;"> With the substantial growth of logistics businesses the need for larger warehouses and their automation arises, thus using robots as assistants to human workers is becoming a priority. In order to operate efficiently and safely, robot assistants or the supervising system should recognize human intentions in real-time. Theory of mind (ToM) is an intuitive human conception of other humans' mental state, i.e., beliefs and desires, and how they cause behavior. In this paper we propose a ToM based human intention estimation algorithm for flexible robotized warehouses. We observe human's, i.e., worker's motion and validate it with respect to the goal locations using generalized Voronoi diagram based path planning. These observations are then processed by the proposed hidden Markov model framework which estimates worker intentions in an online manner, capable of handling changing environments. To test the proposed intention estimation we ran experiments in a real-world laboratory warehouse with a worker wearing Microsoft Hololens augmented reality glasses. Furthermore, in order to demonstrate the scalability of the approach to larger warehouses, we propose to use virtual reality digital warehouse twins in order to realistically simulate worker behavior. We conducted intention estimation experiments in the larger warehouse digital twin with up to 24 running robots. We demonstrate that the proposed framework estimates warehouse worker intentions precisely and in the end we discuss the experimental results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08269v1-abstract-full').style.display = 'none'; document.getElementById('1811.08269v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Robotics and Computer-Integrated Manufacturing 57 (2019): 182-196 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.00324">arXiv:1806.00324</a> <span> [<a href="https://arxiv.org/pdf/1806.00324">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Implementation of Augmented Reality in Autonomous Warehouses: Challenges and Opportunities </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Puljiz%2C+D">David Puljiz</a>, <a href="/search/cs?searchtype=author&query=Gorbachev%2C+G">Gleb Gorbachev</a>, <a href="/search/cs?searchtype=author&query=Hein%2C+B">Bj枚rn Hein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.00324v1-abstract-short" style="display: inline;"> Autonomous warehouses with mobile, rack-carrying robots are starting to become commonplace, with systems such as Amazon's Kiva and Swisslog's CarryPick already implemented in functional warehouses. Such warehouses however still require human intervention for object picking and maintenance. In the European project SafeLog we are developing a safety-vest, used for safety-critical ranging and stoppin… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.00324v1-abstract-full').style.display = 'inline'; document.getElementById('1806.00324v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.00324v1-abstract-full" style="display: none;"> Autonomous warehouses with mobile, rack-carrying robots are starting to become commonplace, with systems such as Amazon's Kiva and Swisslog's CarryPick already implemented in functional warehouses. Such warehouses however still require human intervention for object picking and maintenance. In the European project SafeLog we are developing a safety-vest, used for safety-critical ranging and stopping of mobile robots, an improved planner that can handle large fleets of heterogeneous agents as well as an AR interaction system to navigate and support human workers in such automated environments. Here we present the AR interaction modalities, namely navigation, pick-by-AR and general system interactions that were developed at the moment of writing, as well as the overall system concept and planned future work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.00324v1-abstract-full').style.display = 'none'; document.getElementById('1806.00324v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">As submitted to the VAM-HRI workshop, HRI 2018</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>