CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 79 results for author: <span class="mathjax">Tanaka, Y</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Tanaka%2C+Y">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Tanaka, Y"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Tanaka%2C+Y&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Tanaka, Y"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Tanaka%2C+Y&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Tanaka%2C+Y&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Tanaka%2C+Y&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08371">arXiv:2411.08371</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08371">pdf</a>, <a href="https://arxiv.org/format/2411.08371">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/MLSP58920.2024.10734767">10.1109/MLSP58920.2024.10734767 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Multiscale Graph Construction Using Non-local Cluster Features </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kaneko%2C+R">Reina Kaneko</a>, <a href="/search/cs?searchtype=author&amp;query=Kojima%2C+H">Hayate Kojima</a>, <a href="/search/cs?searchtype=author&amp;query=Yanagiya%2C+K">Kenta Yanagiya</a>, <a href="/search/cs?searchtype=author&amp;query=Hara%2C+J">Junya Hara</a>, <a href="/search/cs?searchtype=author&amp;query=Higashi%2C+H">Hiroshi Higashi</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuichi Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08371v1-abstract-short" style="display: inline;"> This paper presents a multiscale graph construction method using both graph and signal features. Multiscale graph is a hierarchical representation of the graph, where a node at each level indicates a cluster in a finer resolution. To obtain the hierarchical clusters, existing methods often use graph clustering; however, they may ignore signal variations. As a result, these methods could fail to de&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08371v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08371v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08371v1-abstract-full" style="display: none;"> This paper presents a multiscale graph construction method using both graph and signal features. Multiscale graph is a hierarchical representation of the graph, where a node at each level indicates a cluster in a finer resolution. To obtain the hierarchical clusters, existing methods often use graph clustering; however, they may ignore signal variations. As a result, these methods could fail to detect the clusters having similar features on nodes. In this paper, we consider graph and node-wise features simultaneously for multiscale clustering of a graph. With given clusters of the graph, the clusters are merged hierarchically in three steps: 1) Feature vectors in the clusters are extracted. 2) Similarities among cluster features are calculated using optimal transport. 3) A variable $k$-nearest neighbor graph (V$k$NNG) is constructed and graph spectral clustering is applied to the V$k$NNG to obtain clusters at a coarser scale. Additionally, the multiscale graph in this paper has \textit{non-local} characteristics: Nodes with similar features are merged even if they are spatially separated. In experiments on multiscale image and point cloud segmentation, we demonstrate the effectiveness of the proposed method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08371v1-abstract-full').style.display = 'none'; document.getElementById('2411.08371v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05118">arXiv:2411.05118</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05118">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> An emotional expression system with vibrotactile feedback during the robot&#39;s speech </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Konishi%2C+Y">Yuki Konishi</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yoshihiro Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05118v1-abstract-short" style="display: inline;"> This study aimed to develop a system that provides vibrotactile feedback corresponding to the emotional content of text when a communication robot speaks. We used OpenAI&#39;s &#34;GPT-4o Mini&#34; for emotion estimation, extracting valence and arousal values from the text. The amplitude and frequency of vibrotactile stimulation using sine waves were controlled on the basis of estimated emotional values. We a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05118v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05118v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05118v1-abstract-full" style="display: none;"> This study aimed to develop a system that provides vibrotactile feedback corresponding to the emotional content of text when a communication robot speaks. We used OpenAI&#39;s &#34;GPT-4o Mini&#34; for emotion estimation, extracting valence and arousal values from the text. The amplitude and frequency of vibrotactile stimulation using sine waves were controlled on the basis of estimated emotional values. We assembled a palm-sized tactile display to present these vibrotactile stimuli. In the experiment, participants listened to the robot&#39;s speech while holding the device and then evaluated their psychological state. The results suggested that the communication accompanied by the vibrotactile feedback could influence psychological states and intimacy levels. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05118v1-abstract-full').style.display = 'none'; document.getElementById('2411.05118v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Part of proceedings of 6th International Conference AsiaHaptics 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05099">arXiv:2411.05099</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05099">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Exploring Vibrotactile Intensity Perception with Multiple Waveform Parameters </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kuhara%2C+T">Takumi Kuhara</a>, <a href="/search/cs?searchtype=author&amp;query=Yukawa%2C+H">Hikari Yukawa</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yoshihiro Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05099v1-abstract-short" style="display: inline;"> It is known that by longing the duration of a vibrotactile stimuli or applying a damping or an increasing factor to the waveform the perceived intensity is affected in different ways. This paper presents a vibrotactile presentation system assembled with a software waveform generator that enables comparison in the perceived intensity for different waveforms made by multiple parameters. The adjustab&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05099v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05099v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05099v1-abstract-full" style="display: none;"> It is known that by longing the duration of a vibrotactile stimuli or applying a damping or an increasing factor to the waveform the perceived intensity is affected in different ways. This paper presents a vibrotactile presentation system assembled with a software waveform generator that enables comparison in the perceived intensity for different waveforms made by multiple parameters. The adjustable parameters are frequency, amplitude, and wave type for the basic part of the stimuli and in addition, it is possible to apply an exponential decay or increasing factor to the waveform by specificizing the duration. By using the presented system, an easy comparison of the influence to the perception of intensity by different parameters of the waveform is possible. We conducted a preliminary experiment on a variety of waveshapes with and without damping by using this system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05099v1-abstract-full').style.display = 'none'; document.getElementById('2411.05099v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Part of proceedings of 6th International Conference AsiaHaptics 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.23193">arXiv:2410.23193</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.23193">pdf</a>, <a href="https://arxiv.org/format/2410.23193">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ISMAR62088.2024.00111">10.1109/ISMAR62088.2024.00111 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> ReaWristic: Remote Touch Sensation to Fingers from a Wristband via Visually Augmented Electro-Tactile Feedback </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yudai Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Weiss%2C+N">Neil Weiss</a>, <a href="/search/cs?searchtype=author&amp;query=Bolger-Cruz%2C+R+C">Robert Cole Bolger-Cruz</a>, <a href="/search/cs?searchtype=author&amp;query=Hartcher-O%27Brien%2C+J">Jess Hartcher-O&#39;Brien</a>, <a href="/search/cs?searchtype=author&amp;query=Flynn%2C+B">Brendan Flynn</a>, <a href="/search/cs?searchtype=author&amp;query=Boldu%2C+R">Roger Boldu</a>, <a href="/search/cs?searchtype=author&amp;query=Colonnese%2C+N">Nicholas Colonnese</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.23193v1-abstract-short" style="display: inline;"> We present a technique for providing remote tactile feedback to the thumb and index finger via a wristband device. This enables haptics for touch and pinch interactions in mixed reality (MR) while keeping the hand entirely free. We achieve this through a novel cross-modal stimulation, which we term visually augmented electro-tactile feedback. This consists of (1) electrically stimulating the nerve&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23193v1-abstract-full').style.display = 'inline'; document.getElementById('2410.23193v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.23193v1-abstract-full" style="display: none;"> We present a technique for providing remote tactile feedback to the thumb and index finger via a wristband device. This enables haptics for touch and pinch interactions in mixed reality (MR) while keeping the hand entirely free. We achieve this through a novel cross-modal stimulation, which we term visually augmented electro-tactile feedback. This consists of (1) electrically stimulating the nerves that innervate the targeted fingers using our wristband device and (2) concurrently, visually augmenting the targeted finger in MR to steer the perceived sensation to the desired location. In our psychophysics study, we found that our approach provides tactile perception akin to tapping and, even from the wrist, it is capable of delivering the sensation to the targeted fingers with about 50% of sensation occurring in the thumb and about 40% of sensation occurring in the index finger. These results on localizability are unprecedented compared to electro-tactile feedback alone or any prior work for creating sensations in the hand with devices worn on the wrist/arm. Moreover, unlike conventional electro-tactile techniques, our wristband dispenses with gel electrodes. Instead, it incorporates custom-made elastomer-based dry electrodes and a stimulation waveform designed for the electrodes, ensuring the practicality of the device beyond laboratory settings. Lastly, we evaluated the haptic realism of our approach in mixed reality and elicited qualitative feedback from users. Participants preferred our approach to a baseline vibrotactile wrist-worn device. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23193v1-abstract-full').style.display = 'none'; document.getElementById('2410.23193v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 14 figures, published at IEEE ISMAR 2024</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE International Symposium on Mixed and Augmented Reality (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.21274">arXiv:2410.21274</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.21274">pdf</a>, <a href="https://arxiv.org/format/2410.21274">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Discrete Mathematics">cs.DM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> High-level hybridization of heuristics and metaheuristics to solve symmetric TSP: a comparative study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Junior%2C+C+A+d+S">Carlos Alberto da Silva Junior</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+R+Y">Roberto Yuji Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=da+Silva%2C+L+C+F">Luiz Carlos Farias da Silva</a>, <a href="/search/cs?searchtype=author&amp;query=Passaro%2C+A">Angelo Passaro</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.21274v1-abstract-short" style="display: inline;"> The Travelling Salesman Problem - TSP is one of the most explored problems in the scientific literature to solve real problems regarding the economy, transportation, and logistics, to cite a few cases. Adapting TSP to solve different problems has originated several variants of the optimization problem with more complex objectives and different restrictions. Metaheuristics have been used to solve t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21274v1-abstract-full').style.display = 'inline'; document.getElementById('2410.21274v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.21274v1-abstract-full" style="display: none;"> The Travelling Salesman Problem - TSP is one of the most explored problems in the scientific literature to solve real problems regarding the economy, transportation, and logistics, to cite a few cases. Adapting TSP to solve different problems has originated several variants of the optimization problem with more complex objectives and different restrictions. Metaheuristics have been used to solve the problem in polynomial time. Several studies have tried hybridising metaheuristics with specialised heuristics to improve the quality of the solutions. However, we have found no study to evaluate whether the searching mechanism of a particular metaheuristic is more adequate for exploring hybridization. This paper focuses on the solution of the classical TSP using high-level hybridisations, experimenting with eight metaheuristics and heuristics derived from k-OPT, SISR, and segment intersection search, resulting in twenty-four combinations. Some combinations allow more than one set of searching parameters. Problems with 50 to 280 cities are solved. Parameter tuning of the metaheuristics is not carried out, exploiting the different searching patterns of the eight metaheuristics instead. The solutions&#39; quality is compared to those presented in the literature. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21274v1-abstract-full').style.display = 'none'; document.getElementById('2410.21274v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.17524">arXiv:2410.17524</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.17524">pdf</a>, <a href="https://arxiv.org/format/2410.17524">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Mechanisms and Computational Design of Multi-Modal End-Effector with Force Sensing using Gated Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+A">Alvin Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+R">Richard Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Mehta%2C+A">Ankur Mehta</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+D">Dennis Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.17524v2-abstract-short" style="display: inline;"> In limbed robotics, end-effectors must serve dual functions, such as both feet for locomotion and grippers for grasping, which presents design challenges. This paper introduces a multi-modal end-effector capable of transitioning between flat and line foot configurations while providing grasping capabilities. MAGPIE integrates 8-axis force sensing using proposed mechanisms with hall effect sensors,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.17524v2-abstract-full').style.display = 'inline'; document.getElementById('2410.17524v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.17524v2-abstract-full" style="display: none;"> In limbed robotics, end-effectors must serve dual functions, such as both feet for locomotion and grippers for grasping, which presents design challenges. This paper introduces a multi-modal end-effector capable of transitioning between flat and line foot configurations while providing grasping capabilities. MAGPIE integrates 8-axis force sensing using proposed mechanisms with hall effect sensors, enabling both contact and tactile force measurements. We present a computational design framework for our sensing mechanism that accounts for noise and interference, allowing for desired sensitivity and force ranges and generating ideal inverse models. The hardware implementation of MAGPIE is validated through experiments, demonstrating its capability as a foot and verifying the performance of the sensing mechanisms, ideal models, and gated network-based models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.17524v2-abstract-full').style.display = 'none'; document.getElementById('2410.17524v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.16591">arXiv:2410.16591</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.16591">pdf</a>, <a href="https://arxiv.org/format/2410.16591">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Cycloidal Quasi-Direct Drive Actuator Designs with Learning-based Torque Estimation for Legged Robotics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+A">Alvin Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Rafeedi%2C+F">Fadi Rafeedi</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+D">Dennis Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.16591v1-abstract-short" style="display: inline;"> This paper presents a novel approach through the design and implementation of Cycloidal Quasi-Direct Drive actuators for legged robotics. The cycloidal gear mechanism, with its inherent high torque density and mechanical robustness, offers significant advantages over conventional designs. By integrating cycloidal gears into the Quasi-Direct Drive framework, we aim to enhance the performance of leg&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.16591v1-abstract-full').style.display = 'inline'; document.getElementById('2410.16591v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.16591v1-abstract-full" style="display: none;"> This paper presents a novel approach through the design and implementation of Cycloidal Quasi-Direct Drive actuators for legged robotics. The cycloidal gear mechanism, with its inherent high torque density and mechanical robustness, offers significant advantages over conventional designs. By integrating cycloidal gears into the Quasi-Direct Drive framework, we aim to enhance the performance of legged robots, particularly in tasks demanding high torque and dynamic loads, while still keeping them lightweight. Additionally, we develop a torque estimation framework for the actuator using an Actuator Network, which effectively reduces the sim-to-real gap introduced by the cycloidal drive&#39;s complex dynamics. This integration is crucial for capturing the complex dynamics of a cycloidal drive, which contributes to improved learning efficiency, agility, and adaptability for reinforcement learning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.16591v1-abstract-full').style.display = 'none'; document.getElementById('2410.16591v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.06192">arXiv:2410.06192</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.06192">pdf</a>, <a href="https://arxiv.org/format/2410.06192">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Hibikino-Musashi@Home 2024 Team Description Paper </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Isomoto%2C+K">Kosei Isomoto</a>, <a href="/search/cs?searchtype=author&amp;query=Mizutani%2C+A">Akinobu Mizutani</a>, <a href="/search/cs?searchtype=author&amp;query=Matsuzaki%2C+F">Fumiya Matsuzaki</a>, <a href="/search/cs?searchtype=author&amp;query=Sato%2C+H">Hikaru Sato</a>, <a href="/search/cs?searchtype=author&amp;query=Matsumoto%2C+I">Ikuya Matsumoto</a>, <a href="/search/cs?searchtype=author&amp;query=Yamao%2C+K">Kosei Yamao</a>, <a href="/search/cs?searchtype=author&amp;query=Kawabata%2C+T">Takuya Kawabata</a>, <a href="/search/cs?searchtype=author&amp;query=Shiba%2C+T">Tomoya Shiba</a>, <a href="/search/cs?searchtype=author&amp;query=Yano%2C+Y">Yuga Yano</a>, <a href="/search/cs?searchtype=author&amp;query=Yokota%2C+A">Atsuki Yokota</a>, <a href="/search/cs?searchtype=author&amp;query=Kanaoka%2C+D">Daiju Kanaoka</a>, <a href="/search/cs?searchtype=author&amp;query=Yamaguchi%2C+H">Hiromasa Yamaguchi</a>, <a href="/search/cs?searchtype=author&amp;query=Murai%2C+K">Kazuya Murai</a>, <a href="/search/cs?searchtype=author&amp;query=Minje%2C+K">Kim Minje</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+L">Lu Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Suzuka%2C+M">Mayo Suzuka</a>, <a href="/search/cs?searchtype=author&amp;query=Anraku%2C+M">Moeno Anraku</a>, <a href="/search/cs?searchtype=author&amp;query=Yamaguchi%2C+N">Naoki Yamaguchi</a>, <a href="/search/cs?searchtype=author&amp;query=Fujimatsu%2C+S">Satsuki Fujimatsu</a>, <a href="/search/cs?searchtype=author&amp;query=Tokuno%2C+S">Shoshi Tokuno</a>, <a href="/search/cs?searchtype=author&amp;query=Mizo%2C+T">Tadataka Mizo</a>, <a href="/search/cs?searchtype=author&amp;query=Fujino%2C+T">Tomoaki Fujino</a>, <a href="/search/cs?searchtype=author&amp;query=Nakadera%2C+Y">Yuuki Nakadera</a>, <a href="/search/cs?searchtype=author&amp;query=Shishido%2C+Y">Yuka Shishido</a>, <a href="/search/cs?searchtype=author&amp;query=Nakaoka%2C+Y">Yusuke Nakaoka</a> , et al. (3 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.06192v2-abstract-short" style="display: inline;"> This paper provides an overview of the techniques employed by Hibikino-Musashi@Home, which intends to participate in the domestic standard platform league. The team has developed a dataset generator for training a robot vision system and an open-source development environment running on a Human Support Robot simulator. The large language model powered task planner selects appropriate primitive s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06192v2-abstract-full').style.display = 'inline'; document.getElementById('2410.06192v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.06192v2-abstract-full" style="display: none;"> This paper provides an overview of the techniques employed by Hibikino-Musashi@Home, which intends to participate in the domestic standard platform league. The team has developed a dataset generator for training a robot vision system and an open-source development environment running on a Human Support Robot simulator. The large language model powered task planner selects appropriate primitive skills to perform the task requested by users. The team aims to design a home service robot that can assist humans in their homes and continuously attends competitions to evaluate and improve the developed system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06192v2-abstract-full').style.display = 'none'; document.getElementById('2410.06192v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.07794">arXiv:2409.07794</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.07794">pdf</a>, <a href="https://arxiv.org/format/2409.07794">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Efficient Learning of Balanced Signed Graphs via Iterative Linear Programming </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yokota%2C+H">Haruki Yokota</a>, <a href="/search/cs?searchtype=author&amp;query=Higashi%2C+H">Hiroshi Higashi</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuichi Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Cheung%2C+G">Gene Cheung</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.07794v1-abstract-short" style="display: inline;"> Signed graphs are equipped with both positive and negative edge weights, encoding pairwise correlations as well as anti-correlations in data. A balanced signed graph has no cycles of odd number of negative edges. Laplacian of a balanced signed graph has eigenvectors that map simply to ones in a similarity-transformed positive graph Laplacian, thus enabling reuse of well-studied spectral filters de&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.07794v1-abstract-full').style.display = 'inline'; document.getElementById('2409.07794v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.07794v1-abstract-full" style="display: none;"> Signed graphs are equipped with both positive and negative edge weights, encoding pairwise correlations as well as anti-correlations in data. A balanced signed graph has no cycles of odd number of negative edges. Laplacian of a balanced signed graph has eigenvectors that map simply to ones in a similarity-transformed positive graph Laplacian, thus enabling reuse of well-studied spectral filters designed for positive graphs. We propose a fast method to learn a balanced signed graph Laplacian directly from data. Specifically, for each node $i$, to determine its polarity $尾_i \in \{-1,1\}$ and edge weights $\{w_{i,j}\}_{j=1}^N$, we extend a sparse inverse covariance formulation based on linear programming (LP) called CLIME, by adding linear constraints to enforce ``consistent&#34; signs of edge weights $\{w_{i,j}\}_{j=1}^N$ with the polarities of connected nodes -- i.e., positive/negative edges connect nodes of same/opposing polarities. For each LP, we adapt projections on convex set (POCS) to determine a suitable CLIME parameter $蟻&gt; 0$ that guarantees LP feasibility. We solve the resulting LP via an off-the-shelf LP solver in $\mathcal{O}(N^{2.055})$. Experiments on synthetic and real-world datasets show that our balanced graph learning method outperforms competing methods and enables the use of spectral filters and graph convolutional networks (GCNs) designed for positive graphs on signed graphs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.07794v1-abstract-full').style.display = 'none'; document.getElementById('2409.07794v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 1 figure. Submitted to ICASSP 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.06676">arXiv:2409.06676</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.06676">pdf</a>, <a href="https://arxiv.org/format/2409.06676">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Constructing an Interpretable Deep Denoiser by Unrolling Graph Laplacian Regularizer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hosseini%2C+S+A">Seyed Alireza Hosseini</a>, <a href="/search/cs?searchtype=author&amp;query=Do%2C+T+T">Tam Thuc Do</a>, <a href="/search/cs?searchtype=author&amp;query=Cheung%2C+G">Gene Cheung</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuichi Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.06676v1-abstract-short" style="display: inline;"> An image denoiser can be used for a wide range of restoration problems via the Plug-and-Play (PnP) architecture. In this paper, we propose a general framework to build an interpretable graph-based deep denoiser (GDD) by unrolling a solution to a maximum a posteriori (MAP) problem equipped with a graph Laplacian regularizer (GLR) as signal prior. Leveraging a recent theorem showing that any (pseudo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.06676v1-abstract-full').style.display = 'inline'; document.getElementById('2409.06676v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.06676v1-abstract-full" style="display: none;"> An image denoiser can be used for a wide range of restoration problems via the Plug-and-Play (PnP) architecture. In this paper, we propose a general framework to build an interpretable graph-based deep denoiser (GDD) by unrolling a solution to a maximum a posteriori (MAP) problem equipped with a graph Laplacian regularizer (GLR) as signal prior. Leveraging a recent theorem showing that any (pseudo-)linear denoiser $\boldsymbol 唯$, under mild conditions, can be mapped to a solution of a MAP denoising problem regularized using GLR, we first initialize a graph Laplacian matrix $\mathbf L$ via truncated Taylor Series Expansion (TSE) of $\boldsymbol 唯^{-1}$. Then, we compute the MAP linear system solution by unrolling iterations of the conjugate gradient (CG) algorithm into a sequence of neural layers as a feed-forward network -- one that is amenable to parameter tuning. The resulting GDD network is &#34;graph-interpretable&#34;, low in parameter count, and easy to initialize thanks to $\mathbf L$ derived from a known well-performing denoiser $\boldsymbol 唯$. Experimental results show that GDD achieves competitive image denoising performance compared to competitors, but employing far fewer parameters, and is more robust to covariate shift. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.06676v1-abstract-full').style.display = 'none'; document.getElementById('2409.06676v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.12279">arXiv:2408.12279</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.12279">pdf</a>, <a href="https://arxiv.org/format/2408.12279">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Developing vocal system impaired patient-aimed voice quality assessment approach using ASR representation-included multiple features </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dang%2C+S">Shaoxiang Dang</a>, <a href="/search/cs?searchtype=author&amp;query=Matsumoto%2C+T">Tetsuya Matsumoto</a>, <a href="/search/cs?searchtype=author&amp;query=Takeuchi%2C+Y">Yoshinori Takeuchi</a>, <a href="/search/cs?searchtype=author&amp;query=Tsuboi%2C+T">Takashi Tsuboi</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yasuhiro Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Nakatsubo%2C+D">Daisuke Nakatsubo</a>, <a href="/search/cs?searchtype=author&amp;query=Maesawa%2C+S">Satoshi Maesawa</a>, <a href="/search/cs?searchtype=author&amp;query=Saito%2C+R">Ryuta Saito</a>, <a href="/search/cs?searchtype=author&amp;query=Katsuno%2C+M">Masahisa Katsuno</a>, <a href="/search/cs?searchtype=author&amp;query=Kudo%2C+H">Hiroaki Kudo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.12279v1-abstract-short" style="display: inline;"> The potential of deep learning in clinical speech processing is immense, yet the hurdles of limited and imbalanced clinical data samples loom large. This article addresses these challenges by showcasing the utilization of automatic speech recognition and self-supervised learning representations, pre-trained on extensive datasets of normal speech. This innovative approach aims to estimate voice qua&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.12279v1-abstract-full').style.display = 'inline'; document.getElementById('2408.12279v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.12279v1-abstract-full" style="display: none;"> The potential of deep learning in clinical speech processing is immense, yet the hurdles of limited and imbalanced clinical data samples loom large. This article addresses these challenges by showcasing the utilization of automatic speech recognition and self-supervised learning representations, pre-trained on extensive datasets of normal speech. This innovative approach aims to estimate voice quality of patients with impaired vocal systems. Experiments involve checks on PVQD dataset, covering various causes of vocal system damage in English, and a Japanese dataset focusing on patients with Parkinson&#39;s disease before and after undergoing subthalamic nucleus deep brain stimulation (STN-DBS) surgery. The results on PVQD reveal a notable correlation (&gt;0.8 on PCC) and an extraordinary accuracy (&lt;0.5 on MSE) in predicting Grade, Breathy, and Asthenic indicators. Meanwhile, progress has been achieved in predicting the voice quality of patients in the context of STN-DBS. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.12279v1-abstract-full').style.display = 'none'; document.getElementById('2408.12279v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by Interspeech 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.02246">arXiv:2408.02246</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.02246">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> </div> <p class="title is-5 mathjax"> AMIDER: A Multidisciplinary Research Database and Its Application to Promote Open Science </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kozai%2C+M">Masayoshi Kozai</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yoshimasa Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Abe%2C+S">Shuji Abe</a>, <a href="/search/cs?searchtype=author&amp;query=Minamiyama%2C+Y">Yasuyuki Minamiyama</a>, <a href="/search/cs?searchtype=author&amp;query=Shinbori%2C+A">Atsuki Shinbori</a>, <a href="/search/cs?searchtype=author&amp;query=Kadokura%2C+A">Akira Kadokura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.02246v2-abstract-short" style="display: inline;"> The AMIDER, Advanced Multidisciplinary Integrated-Database for Exploring new Research, is a newly developed research data catalog to demonstrate an advanced database application. AMIDER is characterized as a multidisciplinary database equipped with a user-friendly web application. Its catalog view displays diverse research data at once beyond any limitation of each individual discipline. Some usef&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.02246v2-abstract-full').style.display = 'inline'; document.getElementById('2408.02246v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.02246v2-abstract-full" style="display: none;"> The AMIDER, Advanced Multidisciplinary Integrated-Database for Exploring new Research, is a newly developed research data catalog to demonstrate an advanced database application. AMIDER is characterized as a multidisciplinary database equipped with a user-friendly web application. Its catalog view displays diverse research data at once beyond any limitation of each individual discipline. Some useful functions, such as a selectable data download, data format conversion, and display of data visual information, are also implemented. Further advanced functions, such as visualization of dataset mutual relationship, are also implemented as a preliminary trial. These characteristics and functions are expected to enhance the accessibility to individual research data, even from non-expertized users, and be helpful for collaborations among diverse scientific fields beyond individual disciplines. Multidisciplinary data management is also one of AMIDER&#39;s uniqueness, where various metadata schemas can be mapped to a uniform metadata table, and standardized and self-describing data formats are adopted. AMIDER website (https://amider.rois.ac.jp/) had been launched in April 2024. As of July 2024, over 15,000 metadata in various research fields of polar science have been registered in the database, and approximately 500 visitors are viewing the website every day on average. Expansion of the database to further multidisciplinary scientific fields, not only polar science, is planned, and advanced attempts, such as applying Natural Language Processing (NLP) to metadata, have also been considered. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.02246v2-abstract-full').style.display = 'none'; document.getElementById('2408.02246v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 4 figures, submitted to Data Science Journal</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.21075">arXiv:2407.21075</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.21075">pdf</a>, <a href="https://arxiv.org/format/2407.21075">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Apple Intelligence Foundation Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gunter%2C+T">Tom Gunter</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zirui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Pang%2C+R">Ruoming Pang</a>, <a href="/search/cs?searchtype=author&amp;query=Narayanan%2C+A">Andy Narayanan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+A">Aonan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+B">Bowen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chiu%2C+C">Chung-Cheng Chiu</a>, <a href="/search/cs?searchtype=author&amp;query=Qiu%2C+D">David Qiu</a>, <a href="/search/cs?searchtype=author&amp;query=Gopinath%2C+D">Deepak Gopinath</a>, <a href="/search/cs?searchtype=author&amp;query=Yap%2C+D+A">Dian Ang Yap</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+D">Dong Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Nan%2C+F">Feng Nan</a>, <a href="/search/cs?searchtype=author&amp;query=Weers%2C+F">Floris Weers</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+G">Guoli Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Haoshuo Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jianyu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Jiarui Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Peebles%2C+J">John Peebles</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+K">Ke Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+M">Mark Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+N">Nan Du</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Q">Qibin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Keunebroek%2C+Q">Quentin Keunebroek</a> , et al. (130 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.21075v1-abstract-short" style="display: inline;"> We present foundation language models developed to power Apple Intelligence features, including a ~3 billion parameter model designed to run efficiently on devices and a large server-based language model designed for Private Cloud Compute. These models are designed to perform a wide range of tasks efficiently, accurately, and responsibly. This report describes the model architecture, the data used&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.21075v1-abstract-full').style.display = 'inline'; document.getElementById('2407.21075v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.21075v1-abstract-full" style="display: none;"> We present foundation language models developed to power Apple Intelligence features, including a ~3 billion parameter model designed to run efficiently on devices and a large server-based language model designed for Private Cloud Compute. These models are designed to perform a wide range of tasks efficiently, accurately, and responsibly. This report describes the model architecture, the data used to train the model, the training process, how the models are optimized for inference, and the evaluation results. We highlight our focus on Responsible AI and how the principles are applied throughout the model development. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.21075v1-abstract-full').style.display = 'none'; document.getElementById('2407.21075v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.17881">arXiv:2405.17881</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.17881">pdf</a>, <a href="https://arxiv.org/format/2405.17881">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Crystal-LSBO: Automated Design of De Novo Crystals with Latent Space Bayesian Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Boyar%2C+O">Onur Boyar</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+Y">Yanheng Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuji Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Tonogai%2C+S">Shunsuke Tonogai</a>, <a href="/search/cs?searchtype=author&amp;query=Itakura%2C+T">Tomoya Itakura</a>, <a href="/search/cs?searchtype=author&amp;query=Takeuchi%2C+I">Ichiro Takeuchi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.17881v2-abstract-short" style="display: inline;"> Generative modeling of crystal structures is significantly challenged by the complexity of input data, which constrains the ability of these models to explore and discover novel crystals. This complexity often confines de novo design methodologies to merely small perturbations of known crystals and hampers the effective application of advanced optimization techniques. One such optimization techniq&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.17881v2-abstract-full').style.display = 'inline'; document.getElementById('2405.17881v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.17881v2-abstract-full" style="display: none;"> Generative modeling of crystal structures is significantly challenged by the complexity of input data, which constrains the ability of these models to explore and discover novel crystals. This complexity often confines de novo design methodologies to merely small perturbations of known crystals and hampers the effective application of advanced optimization techniques. One such optimization technique, Latent Space Bayesian Optimization (LSBO) has demonstrated promising results in uncovering novel objects across various domains, especially when combined with Variational Autoencoders (VAEs). Recognizing LSBO&#39;s potential and the critical need for innovative crystal discovery, we introduce Crystal-LSBO, a de novo design framework for crystals specifically tailored to enhance explorability within LSBO frameworks. Crystal-LSBO employs multiple VAEs, each dedicated to a distinct aspect of crystal structure: lattice, coordinates, and chemical elements, orchestrated by an integrative model that synthesizes these components into a cohesive output. This setup not only streamlines the learning process but also produces explorable latent spaces thanks to the decreased complexity of the learning task for each model, enabling LSBO approaches to operate. Our study pioneers the use of LSBO for de novo crystal design, demonstrating its efficacy through optimization tasks focused mainly on formation energy values. Our results highlight the effectiveness of our methodology, offering a new perspective for de novo crystal discovery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.17881v2-abstract-full').style.display = 'none'; document.getElementById('2405.17881v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.03998">arXiv:2404.03998</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.03998">pdf</a>, <a href="https://arxiv.org/format/2404.03998">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Physics-Inspired Synthesized Underwater Image Dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kaneko%2C+R">Reina Kaneko</a>, <a href="/search/cs?searchtype=author&amp;query=Higashi%2C+H">Hiroshi Higashi</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuichi Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.03998v1-abstract-short" style="display: inline;"> This paper introduces the physics-inspired synthesized underwater image dataset (PHISWID), a dataset tailored for enhancing underwater image processing through physics-inspired image synthesis. Deep learning approaches to underwater image enhancement typically demand extensive datasets, yet acquiring paired clean and degraded underwater ones poses significant challenges. While several underwater i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.03998v1-abstract-full').style.display = 'inline'; document.getElementById('2404.03998v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.03998v1-abstract-full" style="display: none;"> This paper introduces the physics-inspired synthesized underwater image dataset (PHISWID), a dataset tailored for enhancing underwater image processing through physics-inspired image synthesis. Deep learning approaches to underwater image enhancement typically demand extensive datasets, yet acquiring paired clean and degraded underwater ones poses significant challenges. While several underwater image datasets have been proposed using physics-based synthesis, a publicly accessible collection has been lacking. Additionally, most underwater image synthesis approaches do not intend to reproduce atmospheric scenes, resulting in incomplete enhancement. PHISWID addresses this gap by offering a set of paired ground-truth (atmospheric) and synthetically degraded underwater images, showcasing not only color degradation but also the often-neglected effects of marine snow, a composite of organic matter and sand particles that considerably impairs underwater image clarity. The dataset applies these degradations to atmospheric RGB-D images, enhancing the dataset&#39;s realism and applicability. PHISWID is particularly valuable for training deep neural networks in a supervised learning setting and for objectively assessing image quality in benchmark analyses. Our results reveal that even a basic U-Net architecture, when trained with PHISWID, substantially outperforms existing methods in underwater image enhancement. We intend to release PHISWID publicly, contributing a significant resource to the advancement of underwater imaging technology. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.03998v1-abstract-full').style.display = 'none'; document.getElementById('2404.03998v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.09018">arXiv:2402.09018</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.09018">pdf</a>, <a href="https://arxiv.org/format/2402.09018">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Neural Operators Meet Energy-based Theory: Operator Learning for Hamiltonian and Dissipative PDEs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Yaguchi%2C+T">Takaharu Yaguchi</a>, <a href="/search/cs?searchtype=author&amp;query=Iwata%2C+T">Tomoharu Iwata</a>, <a href="/search/cs?searchtype=author&amp;query=Ueda%2C+N">Naonori Ueda</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.09018v1-abstract-short" style="display: inline;"> The operator learning has received significant attention in recent years, with the aim of learning a mapping between function spaces. Prior works have proposed deep neural networks (DNNs) for learning such a mapping, enabling the learning of solution operators of partial differential equations (PDEs). However, these works still struggle to learn dynamics that obeys the laws of physics. This paper&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.09018v1-abstract-full').style.display = 'inline'; document.getElementById('2402.09018v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.09018v1-abstract-full" style="display: none;"> The operator learning has received significant attention in recent years, with the aim of learning a mapping between function spaces. Prior works have proposed deep neural networks (DNNs) for learning such a mapping, enabling the learning of solution operators of partial differential equations (PDEs). However, these works still struggle to learn dynamics that obeys the laws of physics. This paper proposes Energy-consistent Neural Operators (ENOs), a general framework for learning solution operators of PDEs that follows the energy conservation or dissipation law from observed solution trajectories. We introduce a novel penalty function inspired by the energy-based theory of physics for training, in which the energy functional is modeled by another DNN, allowing one to bias the outputs of the DNN-based solution operators to ensure energetic consistency without explicit PDEs. Experiments on multiple physical systems show that ENO outperforms existing DNN models in predicting solutions from data, especially in super-resolution settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.09018v1-abstract-full').style.display = 'none'; document.getElementById('2402.09018v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.16719">arXiv:2401.16719</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.16719">pdf</a>, <a href="https://arxiv.org/format/2401.16719">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> OptiState: State Estimation of Legged Robots using Gated Networks with Transformer-based Vision and Kalman Filtering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Schperberg%2C+A">Alexander Schperberg</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Mowlavi%2C+S">Saviz Mowlavi</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+F">Feng Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Balaji%2C+B">Bharathan Balaji</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+D">Dennis Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.16719v3-abstract-short" style="display: inline;"> State estimation for legged robots is challenging due to their highly dynamic motion and limitations imposed by sensor accuracy. By integrating Kalman filtering, optimization, and learning-based modalities, we propose a hybrid solution that combines proprioception and exteroceptive information for estimating the state of the robot&#39;s trunk. Leveraging joint encoder and IMU measurements, our Kalman&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.16719v3-abstract-full').style.display = 'inline'; document.getElementById('2401.16719v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.16719v3-abstract-full" style="display: none;"> State estimation for legged robots is challenging due to their highly dynamic motion and limitations imposed by sensor accuracy. By integrating Kalman filtering, optimization, and learning-based modalities, we propose a hybrid solution that combines proprioception and exteroceptive information for estimating the state of the robot&#39;s trunk. Leveraging joint encoder and IMU measurements, our Kalman filter is enhanced through a single-rigid body model that incorporates ground reaction force control outputs from convex Model Predictive Control optimization. The estimation is further refined through Gated Recurrent Units, which also considers semantic insights and robot height from a Vision Transformer autoencoder applied on depth images. This framework not only furnishes accurate robot state estimates, including uncertainty evaluations, but can minimize the nonlinear errors that arise from sensor measurements and model simplifications through learning. The proposed methodology is evaluated in hardware using a quadruped robot on various terrains, yielding a 65% improvement on the Root Mean Squared Error compared to our VIO SLAM baseline. Code example: https://github.com/AlexS28/OptiState <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.16719v3-abstract-full').style.display = 'none'; document.getElementById('2401.16719v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to the 2024 IEEE International Conference on Robotics and Automation (ICRA), May 13-17, in Yokohama, Japan. 7 pages, 5 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.15846">arXiv:2401.15846</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.15846">pdf</a>, <a href="https://arxiv.org/format/2401.15846">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Meta-Learning for Neural Network-based Temporal Point Processes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Takimoto%2C+Y">Yoshiaki Takimoto</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Iwata%2C+T">Tomoharu Iwata</a>, <a href="/search/cs?searchtype=author&amp;query=Okawa%2C+M">Maya Okawa</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+H">Hideaki Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Toda%2C+H">Hiroyuki Toda</a>, <a href="/search/cs?searchtype=author&amp;query=Kurashima%2C+T">Takeshi Kurashima</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.15846v1-abstract-short" style="display: inline;"> Human activities generate various event sequences such as taxi trip records, bike-sharing pick-ups, crime occurrence, and infectious disease transmission. The point process is widely used in many applications to predict such events related to human activities. However, point processes present two problems in predicting events related to human activities. First, recent high-performance point proces&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.15846v1-abstract-full').style.display = 'inline'; document.getElementById('2401.15846v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.15846v1-abstract-full" style="display: none;"> Human activities generate various event sequences such as taxi trip records, bike-sharing pick-ups, crime occurrence, and infectious disease transmission. The point process is widely used in many applications to predict such events related to human activities. However, point processes present two problems in predicting events related to human activities. First, recent high-performance point process models require the input of sufficient numbers of events collected over a long period (i.e., long sequences) for training, which are often unavailable in realistic situations. Second, the long-term predictions required in real-world applications are difficult. To tackle these problems, we propose a novel meta-learning approach for periodicity-aware prediction of future events given short sequences. The proposed method first embeds short sequences into hidden representations (i.e., task representations) via recurrent neural networks for creating predictions from short sequences. It then models the intensity of the point process by monotonic neural networks (MNNs), with the input being the task representations. We transfer the prior knowledge learned from related tasks and can improve event prediction given short sequences of target tasks. We design the MNNs to explicitly take temporal periodic patterns into account, contributing to improved long-term prediction performance. Experiments on multiple real-world datasets demonstrate that the proposed method has higher prediction performance than existing alternatives. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.15846v1-abstract-full').style.display = 'none'; document.getElementById('2401.15846v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.08245">arXiv:2401.08245</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.08245">pdf</a>, <a href="https://arxiv.org/format/2401.08245">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Optimizing $k$ in $k$NN Graphs with Graph Learning Perspective </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tamaru%2C+A">Asuka Tamaru</a>, <a href="/search/cs?searchtype=author&amp;query=Hara%2C+J">Junya Hara</a>, <a href="/search/cs?searchtype=author&amp;query=Higashi%2C+H">Hiroshi Higashi</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuichi Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Ortega%2C+A">Antonio Ortega</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.08245v1-abstract-short" style="display: inline;"> In this paper, we propose a method, based on graph signal processing, to optimize the choice of $k$ in $k$-nearest neighbor graphs ($k$NNGs). $k$NN is one of the most popular approaches and is widely used in machine learning and signal processing. The parameter $k$ represents the number of neighbors that are connected to the target node; however, its appropriate selection is still a challenging pr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.08245v1-abstract-full').style.display = 'inline'; document.getElementById('2401.08245v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.08245v1-abstract-full" style="display: none;"> In this paper, we propose a method, based on graph signal processing, to optimize the choice of $k$ in $k$-nearest neighbor graphs ($k$NNGs). $k$NN is one of the most popular approaches and is widely used in machine learning and signal processing. The parameter $k$ represents the number of neighbors that are connected to the target node; however, its appropriate selection is still a challenging problem. Therefore, most $k$NNGs use ad hoc selection methods for $k$. In the proposed method, we assume that a different $k$ can be chosen for each node. We formulate a discrete optimization problem to seek the best $k$ with a constraint on the sum of distances of the connected nodes. The optimal $k$ values are efficiently obtained without solving a complex optimization. Furthermore, we reveal that the proposed method is closely related to existing graph learning methods. In experiments on real datasets, we demonstrate that the $k$NNGs obtained with our method are sparse and can determine an appropriate variable number of edges per node. We validate the effectiveness of the proposed method for point cloud denoising, comparing our denoising performance with achievable graph construction methods that can be scaled to typical point cloud sizes (e.g., thousands of nodes). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.08245v1-abstract-full').style.display = 'none'; document.getElementById('2401.08245v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.04856">arXiv:2312.04856</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.04856">pdf</a>, <a href="https://arxiv.org/format/2312.04856">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> SCALER: Versatile Multi-Limbed Robot for Free-Climbing in Extreme Terrains </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Shirai%2C+Y">Yuki Shirai</a>, <a href="/search/cs?searchtype=author&amp;query=Schperberg%2C+A">Alexander Schperberg</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+X">Xuan Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+D">Dennis Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.04856v2-abstract-short" style="display: inline;"> This paper presents SCALER, a versatile free-climbing multi-limbed robot that is designed to achieve tightly coupled simultaneous locomotion and dexterous grasping. Although existing quadruped-limbed robots have shown impressive dexterous skills such as object manipulation, it is essential to balance power-intensive locomotion and dexterous grasping capabilities. We design a torso linkage and a pa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.04856v2-abstract-full').style.display = 'inline'; document.getElementById('2312.04856v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.04856v2-abstract-full" style="display: none;"> This paper presents SCALER, a versatile free-climbing multi-limbed robot that is designed to achieve tightly coupled simultaneous locomotion and dexterous grasping. Although existing quadruped-limbed robots have shown impressive dexterous skills such as object manipulation, it is essential to balance power-intensive locomotion and dexterous grasping capabilities. We design a torso linkage and a parallel-serial limb to meet such conflicting skills that pose unique challenges in the hardware designs. SCALER employs underactuated two-fingered GOAT grippers that can mechanically adapt and offer 7 modes of grasping, enabling SCALER to traverse extreme terrains with multi-modal grasping strategies. We study the whole-body approach, where SCALER uses its body and limbs to generate additional forces for stable grasping with environments, further enhancing versatility. Furthermore, we improve the GOAT gripper actuation speed to realize more dynamic climbing in a closed-loop control fashion. With these proposed technologies, SCALER can traverse vertical, overhang, upside-down, slippery terrains, and bouldering walls with non-convex-shaped climbing holds under the Earth&#39;s gravity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.04856v2-abstract-full').style.display = 'none'; document.getElementById('2312.04856v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.15856">arXiv:2310.15856</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.15856">pdf</a>, <a href="https://arxiv.org/ps/2310.15856">ps</a>, <a href="https://arxiv.org/format/2310.15856">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Group Theory">math.GR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Number Theory">math.NT</span> </div> </div> <p class="title is-5 mathjax"> A criterion for determining whether multiple shells support a $t$-design </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Awada%2C+M">Madoka Awada</a>, <a href="/search/cs?searchtype=author&amp;query=Ishikawa%2C+R">Reina Ishikawa</a>, <a href="/search/cs?searchtype=author&amp;query=Miezaki%2C+T">Tsuyoshi Miezaki</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuuho Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.15856v2-abstract-short" style="display: inline;"> In this paper, we provide a criterion for determining whether multiple shells support a $t$-design. We construct as a corollary an infinite series of $2$-designs using power residue codes. </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.15856v2-abstract-full" style="display: none;"> In this paper, we provide a criterion for determining whether multiple shells support a $t$-design. We construct as a corollary an infinite series of $2$-designs using power residue codes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.15856v2-abstract-full').style.display = 'none'; document.getElementById('2310.15856v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages. arXiv admin note: substantial text overlap with arXiv:2309.03206, arXiv:2305.03285, arXiv:2310.14281</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> Primary 94B05; Secondary 05B05 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.13270">arXiv:2310.13270</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.13270">pdf</a>, <a href="https://arxiv.org/format/2310.13270">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Meta-learning of Physics-informed Neural Networks for Efficiently Solving Newly Given PDEs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Iwata%2C+T">Tomoharu Iwata</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Ueda%2C+N">Naonori Ueda</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.13270v1-abstract-short" style="display: inline;"> We propose a neural network-based meta-learning method to efficiently solve partial differential equation (PDE) problems. The proposed method is designed to meta-learn how to solve a wide variety of PDE problems, and uses the knowledge for solving newly given PDE problems. We encode a PDE problem into a problem representation using neural networks, where governing equations are represented by coef&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.13270v1-abstract-full').style.display = 'inline'; document.getElementById('2310.13270v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.13270v1-abstract-full" style="display: none;"> We propose a neural network-based meta-learning method to efficiently solve partial differential equation (PDE) problems. The proposed method is designed to meta-learn how to solve a wide variety of PDE problems, and uses the knowledge for solving newly given PDE problems. We encode a PDE problem into a problem representation using neural networks, where governing equations are represented by coefficients of a polynomial function of partial derivatives, and boundary conditions are represented by a set of point-condition pairs. We use the problem representation as an input of a neural network for predicting solutions, which enables us to efficiently predict problem-specific solutions by the forwarding process of the neural network without updating model parameters. To train our model, we minimize the expected error when adapted to a PDE problem based on the physics-informed neural network framework, by which we can evaluate the error even when solutions are unknown. We demonstrate that our proposed method outperforms existing methods in predicting solutions of PDE problems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.13270v1-abstract-full').style.display = 'none'; document.getElementById('2310.13270v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.12650">arXiv:2310.12650</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.12650">pdf</a>, <a href="https://arxiv.org/format/2310.12650">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Hibikino-Musashi@Home 2023 Team Description Paper </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shiba%2C+T">Tomoya Shiba</a>, <a href="/search/cs?searchtype=author&amp;query=Mizutani%2C+A">Akinobu Mizutani</a>, <a href="/search/cs?searchtype=author&amp;query=Yano%2C+Y">Yuga Yano</a>, <a href="/search/cs?searchtype=author&amp;query=Ono%2C+T">Tomohiro Ono</a>, <a href="/search/cs?searchtype=author&amp;query=Tokuno%2C+S">Shoshi Tokuno</a>, <a href="/search/cs?searchtype=author&amp;query=Kanaoka%2C+D">Daiju Kanaoka</a>, <a href="/search/cs?searchtype=author&amp;query=Fukuda%2C+Y">Yukiya Fukuda</a>, <a href="/search/cs?searchtype=author&amp;query=Amano%2C+H">Hayato Amano</a>, <a href="/search/cs?searchtype=author&amp;query=Koresawa%2C+M">Mayu Koresawa</a>, <a href="/search/cs?searchtype=author&amp;query=Sakai%2C+Y">Yoshifumi Sakai</a>, <a href="/search/cs?searchtype=author&amp;query=Takemoto%2C+R">Ryogo Takemoto</a>, <a href="/search/cs?searchtype=author&amp;query=Tamai%2C+K">Katsunori Tamai</a>, <a href="/search/cs?searchtype=author&amp;query=Nakahara%2C+K">Kazuo Nakahara</a>, <a href="/search/cs?searchtype=author&amp;query=Hayashi%2C+H">Hiroyuki Hayashi</a>, <a href="/search/cs?searchtype=author&amp;query=Fujimatsu%2C+S">Satsuki Fujimatsu</a>, <a href="/search/cs?searchtype=author&amp;query=Mizoguchi%2C+Y">Yusuke Mizoguchi</a>, <a href="/search/cs?searchtype=author&amp;query=Anraku%2C+M">Moeno Anraku</a>, <a href="/search/cs?searchtype=author&amp;query=Suzuka%2C+M">Mayo Suzuka</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+L">Lu Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Maeda%2C+K">Kohei Maeda</a>, <a href="/search/cs?searchtype=author&amp;query=Matsuzaki%2C+F">Fumiya Matsuzaki</a>, <a href="/search/cs?searchtype=author&amp;query=Matsumoto%2C+I">Ikuya Matsumoto</a>, <a href="/search/cs?searchtype=author&amp;query=Murai%2C+K">Kazuya Murai</a>, <a href="/search/cs?searchtype=author&amp;query=Isomoto%2C+K">Kosei Isomoto</a>, <a href="/search/cs?searchtype=author&amp;query=Minje%2C+K">Kim Minje</a> , et al. (3 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.12650v1-abstract-short" style="display: inline;"> This paper describes an overview of the techniques of Hibikino-Musashi@Home, which intends to participate in the domestic standard platform league. The team has developed a dataset generator for the training of a robot vision system and an open-source development environment running on a human support robot simulator. The robot system comprises self-developed libraries including those for motion s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.12650v1-abstract-full').style.display = 'inline'; document.getElementById('2310.12650v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.12650v1-abstract-full" style="display: none;"> This paper describes an overview of the techniques of Hibikino-Musashi@Home, which intends to participate in the domestic standard platform league. The team has developed a dataset generator for the training of a robot vision system and an open-source development environment running on a human support robot simulator. The robot system comprises self-developed libraries including those for motion synthesis and open-source software works on the robot operating system. The team aims to realize a home service robot that assists humans in a home, and continuously attend the competition to evaluate the developed system. The brain-inspired artificial intelligence system is also proposed for service robots which are expected to work in a real home environment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.12650v1-abstract-full').style.display = 'none'; document.getElementById('2310.12650v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.06379">arXiv:2310.06379</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.06379">pdf</a>, <a href="https://arxiv.org/format/2310.06379">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Understanding the Expressivity and Trainability of Fourier Neural Operator: A Mean-Field Perspective </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Koshizuka%2C+T">Takeshi Koshizuka</a>, <a href="/search/cs?searchtype=author&amp;query=Fujisawa%2C+M">Masahiro Fujisawa</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Sato%2C+I">Issei Sato</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.06379v3-abstract-short" style="display: inline;"> In this paper, we explores the expressivity and trainability of the Fourier Neural Operator (FNO). We establish a mean-field theory for the FNO, analyzing the behavior of the random FNO from an edge of chaos perspective. Our investigation into the expressivity of a random FNO involves examining the ordered-chaos phase transition of the network based on the weight distribution. This phase transitio&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.06379v3-abstract-full').style.display = 'inline'; document.getElementById('2310.06379v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.06379v3-abstract-full" style="display: none;"> In this paper, we explores the expressivity and trainability of the Fourier Neural Operator (FNO). We establish a mean-field theory for the FNO, analyzing the behavior of the random FNO from an edge of chaos perspective. Our investigation into the expressivity of a random FNO involves examining the ordered-chaos phase transition of the network based on the weight distribution. This phase transition demonstrates characteristics unique to the FNO, induced by mode truncation, while also showcasing similarities to those of densely connected networks. Furthermore, we identify a connection between expressivity and trainability: the ordered and chaotic phases correspond to regions of vanishing and exploding gradients, respectively. This finding provides a practical prerequisite for the stable training of the FNO. Our experimental results corroborate our theoretical findings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.06379v3-abstract-full').style.display = 'none'; document.getElementById('2310.06379v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.16729">arXiv:2305.16729</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.16729">pdf</a>, <a href="https://arxiv.org/format/2305.16729">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Chaotic Dynamics">nlin.CD</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.14495/jsiaml.15.117">10.14495/jsiaml.15.117 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Evaluating generation of chaotic time series by convolutional generative adversarial networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuki Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Yamaguti%2C+Y">Yutaka Yamaguti</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.16729v2-abstract-short" style="display: inline;"> To understand the ability and limitations of convolutional neural networks to generate time series that mimic complex temporal signals, we trained a generative adversarial network consisting of deep convolutional networks to generate chaotic time series and used nonlinear time series analysis to evaluate the generated time series. A numerical measure of determinism and the Lyapunov exponent, a mea&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.16729v2-abstract-full').style.display = 'inline'; document.getElementById('2305.16729v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.16729v2-abstract-full" style="display: none;"> To understand the ability and limitations of convolutional neural networks to generate time series that mimic complex temporal signals, we trained a generative adversarial network consisting of deep convolutional networks to generate chaotic time series and used nonlinear time series analysis to evaluate the generated time series. A numerical measure of determinism and the Lyapunov exponent, a measure of trajectory instability, showed that the generated time series well reproduce the chaotic properties of the original time series. However, error distribution analyses showed that large errors appeared at a low but non-negligible rate. Such errors would not be expected if the distribution were assumed to be exponential. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.16729v2-abstract-full').style.display = 'none'; document.getElementById('2305.16729v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> JSIAM Letters, 15 (2023), 117-120 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.06576">arXiv:2305.06576</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.06576">pdf</a>, <a href="https://arxiv.org/format/2305.06576">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Clustering of Time-Varying Graphs Based on Temporal Label Smoothness </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Fukumoto%2C+K">Katsuki Fukumoto</a>, <a href="/search/cs?searchtype=author&amp;query=Yamada%2C+K">Koki Yamada</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuichi Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Wai%2C+H">Hoi-To Wai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.06576v1-abstract-short" style="display: inline;"> We propose a node clustering method for time-varying graphs based on the assumption that the cluster labels are changed smoothly over time. Clustering is one of the fundamental tasks in many science and engineering fields including signal processing, machine learning, and data mining. Although most existing studies focus on the clustering of nodes in static graphs, we often encounter time-varying&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.06576v1-abstract-full').style.display = 'inline'; document.getElementById('2305.06576v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.06576v1-abstract-full" style="display: none;"> We propose a node clustering method for time-varying graphs based on the assumption that the cluster labels are changed smoothly over time. Clustering is one of the fundamental tasks in many science and engineering fields including signal processing, machine learning, and data mining. Although most existing studies focus on the clustering of nodes in static graphs, we often encounter time-varying graphs for time-series data, e.g., social networks, brain functional connectivity, and point clouds. In this paper, we formulate a node clustering of time-varying graphs as an optimization problem based on spectral clustering, with a smoothness constraint of the node labels. We solve the problem with a primal-dual splitting algorithm. Experiments on synthetic and real-world time-varying graphs are performed to validate the effectiveness of the proposed approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.06576v1-abstract-full').style.display = 'none'; document.getElementById('2305.06576v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.02538">arXiv:2305.02538</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.02538">pdf</a>, <a href="https://arxiv.org/format/2305.02538">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Cuttlefish: Low-Rank Model Training without All the Tuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongyi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Agarwal%2C+S">Saurabh Agarwal</a>, <a href="/search/cs?searchtype=author&amp;query=U-chupala%2C+P">Pongsakorn U-chupala</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yoshiki Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Xing%2C+E+P">Eric P. Xing</a>, <a href="/search/cs?searchtype=author&amp;query=Papailiopoulos%2C+D">Dimitris Papailiopoulos</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.02538v2-abstract-short" style="display: inline;"> Recent research has shown that training low-rank neural networks can effectively reduce the total number of trainable parameters without sacrificing predictive accuracy, resulting in end-to-end speedups. However, low-rank model training necessitates adjusting several additional factorization hyperparameters, such as the rank of the factorization at each layer. In this paper, we tackle this challen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.02538v2-abstract-full').style.display = 'inline'; document.getElementById('2305.02538v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.02538v2-abstract-full" style="display: none;"> Recent research has shown that training low-rank neural networks can effectively reduce the total number of trainable parameters without sacrificing predictive accuracy, resulting in end-to-end speedups. However, low-rank model training necessitates adjusting several additional factorization hyperparameters, such as the rank of the factorization at each layer. In this paper, we tackle this challenge by introducing Cuttlefish, an automated low-rank training approach that eliminates the need for tuning factorization hyperparameters. Cuttlefish leverages the observation that after a few epochs of full-rank training, the stable rank (i.e., an approximation of the true rank) of each layer stabilizes at a constant value. Cuttlefish switches from full-rank to low-rank training once the stable ranks of all layers have converged, setting the dimension of each factorization to its corresponding stable rank. Our results show that Cuttlefish generates models up to 5.6 times smaller than full-rank models, and attains up to a 1.2 times faster end-to-end training process while preserving comparable accuracy. Moreover, Cuttlefish outperforms state-of-the-art low-rank model training methods and other prominent baselines. The source code for our implementation can be found at: https://github.com/hwang595/Cuttlefish. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.02538v2-abstract-full').style.display = 'none'; document.getElementById('2305.02538v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for presentation at MLSys 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.00944">arXiv:2303.00944</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.00944">pdf</a>, <a href="https://arxiv.org/format/2303.00944">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Attention-based Graph Convolution Fusing Latent Structures and Multiple Features for Graph Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuichi Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.00944v2-abstract-short" style="display: inline;"> We present an attention-based spatial graph convolution (AGC) for graph neural networks (GNNs). Existing AGCs focus on only using node-wise features and utilizing one type of attention function when calculating attention weights. Instead, we propose two methods to improve the representational power of AGCs by utilizing 1) structural information in a high-dimensional space and 2) multiple attention&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.00944v2-abstract-full').style.display = 'inline'; document.getElementById('2303.00944v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.00944v2-abstract-full" style="display: none;"> We present an attention-based spatial graph convolution (AGC) for graph neural networks (GNNs). Existing AGCs focus on only using node-wise features and utilizing one type of attention function when calculating attention weights. Instead, we propose two methods to improve the representational power of AGCs by utilizing 1) structural information in a high-dimensional space and 2) multiple attention functions when calculating their weights. The first method computes a local structure representation of a graph in a high-dimensional space. The second method utilizes multiple attention functions simultaneously in one AGC. Both approaches can be combined. We also propose a GNN for the classification of point clouds and that for the prediction of point labels in a point cloud based on the proposed AGC. According to experiments, the proposed GNNs perform better than existing methods. Our codes open at https://github.com/liyang-tuat/SFAGC. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.00944v2-abstract-full').style.display = 'none'; document.getElementById('2303.00944v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.06696">arXiv:2211.06696</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2211.06696">pdf</a>, <a href="https://arxiv.org/format/2211.06696">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Hibikino-Musashi@Home 2022 Team Description Paper </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shiba%2C+T">Tomoya Shiba</a>, <a href="/search/cs?searchtype=author&amp;query=Ono%2C+T">Tomohiro Ono</a>, <a href="/search/cs?searchtype=author&amp;query=Tokuno%2C+S">Shoshi Tokuno</a>, <a href="/search/cs?searchtype=author&amp;query=Uchino%2C+I">Issei Uchino</a>, <a href="/search/cs?searchtype=author&amp;query=Okamoto%2C+M">Masaya Okamoto</a>, <a href="/search/cs?searchtype=author&amp;query=Kanaoka%2C+D">Daiju Kanaoka</a>, <a href="/search/cs?searchtype=author&amp;query=Takahashi%2C+K">Kazutaka Takahashi</a>, <a href="/search/cs?searchtype=author&amp;query=Tsukamoto%2C+K">Kenta Tsukamoto</a>, <a href="/search/cs?searchtype=author&amp;query=Tsutsumi%2C+Y">Yoshiaki Tsutsumi</a>, <a href="/search/cs?searchtype=author&amp;query=Nakamura%2C+Y">Yugo Nakamura</a>, <a href="/search/cs?searchtype=author&amp;query=Fukuda%2C+Y">Yukiya Fukuda</a>, <a href="/search/cs?searchtype=author&amp;query=Hoji%2C+Y">Yusuke Hoji</a>, <a href="/search/cs?searchtype=author&amp;query=Amano%2C+H">Hayato Amano</a>, <a href="/search/cs?searchtype=author&amp;query=Kubota%2C+Y">Yuma Kubota</a>, <a href="/search/cs?searchtype=author&amp;query=Koresawa%2C+M">Mayu Koresawa</a>, <a href="/search/cs?searchtype=author&amp;query=Sakai%2C+Y">Yoshifumi Sakai</a>, <a href="/search/cs?searchtype=author&amp;query=Takemoto%2C+R">Ryogo Takemoto</a>, <a href="/search/cs?searchtype=author&amp;query=Tamai%2C+K">Katsunori Tamai</a>, <a href="/search/cs?searchtype=author&amp;query=Nakahara%2C+K">Kazuo Nakahara</a>, <a href="/search/cs?searchtype=author&amp;query=Hayashi%2C+H">Hiroyuki Hayashi</a>, <a href="/search/cs?searchtype=author&amp;query=Fujimatsu%2C+S">Satsuki Fujimatsu</a>, <a href="/search/cs?searchtype=author&amp;query=Mizutani%2C+A">Akinobu Mizutani</a>, <a href="/search/cs?searchtype=author&amp;query=Mizoguchi%2C+Y">Yusuke Mizoguchi</a>, <a href="/search/cs?searchtype=author&amp;query=Yoshimitsu%2C+Y">Yuhei Yoshimitsu</a>, <a href="/search/cs?searchtype=author&amp;query=Suzuka%2C+M">Mayo Suzuka</a> , et al. (5 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.06696v1-abstract-short" style="display: inline;"> Our team, Hibikino-Musashi@Home (HMA), was founded in 2010. It is based in Japan in the Kitakyushu Science and Research Park. Since 2010, we have annually participated in the RoboCup@Home Japan Open competition in the open platform league (OPL).We participated as an open platform league team in the 2017 Nagoya RoboCup competition and as a domestic standard platform league (DSPL) team in the 2017 N&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.06696v1-abstract-full').style.display = 'inline'; document.getElementById('2211.06696v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.06696v1-abstract-full" style="display: none;"> Our team, Hibikino-Musashi@Home (HMA), was founded in 2010. It is based in Japan in the Kitakyushu Science and Research Park. Since 2010, we have annually participated in the RoboCup@Home Japan Open competition in the open platform league (OPL).We participated as an open platform league team in the 2017 Nagoya RoboCup competition and as a domestic standard platform league (DSPL) team in the 2017 Nagoya, 2018 Montreal, 2019 Sydney, and 2021 Worldwide RoboCup competitions.We also participated in theWorld Robot Challenge (WRC) 2018 in the service-robotics category of the partner-robot challenge (real space) and won first place. Currently, we have 27 members from nine different laboratories within the Kyushu Institute of Technology and the university of Kitakyushu. In this paper, we introduce the activities that have been performed by our team and the technologies that we use. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.06696v1-abstract-full').style.display = 'none'; document.getElementById('2211.06696v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:2005.14451, arXiv:2006.01233</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.04972">arXiv:2211.04972</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2211.04972">pdf</a>, <a href="https://arxiv.org/ps/2211.04972">ps</a>, <a href="https://arxiv.org/format/2211.04972">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Hibikino-Musashi@Home 2018 Team Description Paper </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ishida%2C+Y">Yutaro Ishida</a>, <a href="/search/cs?searchtype=author&amp;query=Hori%2C+S">Sansei Hori</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuichiro Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Yoshimoto%2C+Y">Yuma Yoshimoto</a>, <a href="/search/cs?searchtype=author&amp;query=Hashimoto%2C+K">Kouhei Hashimoto</a>, <a href="/search/cs?searchtype=author&amp;query=Iwamoto%2C+G">Gouki Iwamoto</a>, <a href="/search/cs?searchtype=author&amp;query=Aratani%2C+Y">Yoshiya Aratani</a>, <a href="/search/cs?searchtype=author&amp;query=Yamashita%2C+K">Kenya Yamashita</a>, <a href="/search/cs?searchtype=author&amp;query=Ishimoto%2C+S">Shinya Ishimoto</a>, <a href="/search/cs?searchtype=author&amp;query=Hitaka%2C+K">Kyosuke Hitaka</a>, <a href="/search/cs?searchtype=author&amp;query=Yamaguchi%2C+F">Fumiaki Yamaguchi</a>, <a href="/search/cs?searchtype=author&amp;query=Miyoshi%2C+R">Ryuhei Miyoshi</a>, <a href="/search/cs?searchtype=author&amp;query=Honda%2C+K">Kentaro Honda</a>, <a href="/search/cs?searchtype=author&amp;query=Abe%2C+Y">Yushi Abe</a>, <a href="/search/cs?searchtype=author&amp;query=Kato%2C+Y">Yoshitaka Kato</a>, <a href="/search/cs?searchtype=author&amp;query=Morie%2C+T">Takashi Morie</a>, <a href="/search/cs?searchtype=author&amp;query=Tamukoh%2C+H">Hakaru Tamukoh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.04972v1-abstract-short" style="display: inline;"> Our team, Hibikino-Musashi@Home (the shortened name is HMA), was founded in 2010. It is based in the Kitakyushu Science and Research Park, Japan. We have participated in the RoboCup@Home Japan open competition open platform league every year since 2010. Moreover, we participated in the RoboCup 2017 Nagoya as open platform league and domestic standard platform league teams. Currently, the Hibikino-&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.04972v1-abstract-full').style.display = 'inline'; document.getElementById('2211.04972v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.04972v1-abstract-full" style="display: none;"> Our team, Hibikino-Musashi@Home (the shortened name is HMA), was founded in 2010. It is based in the Kitakyushu Science and Research Park, Japan. We have participated in the RoboCup@Home Japan open competition open platform league every year since 2010. Moreover, we participated in the RoboCup 2017 Nagoya as open platform league and domestic standard platform league teams. Currently, the Hibikino-Musashi@Home team has 20 members from seven different laboratories based in the Kyushu Institute of Technology. In this paper, we introduce the activities of our team and the technologies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.04972v1-abstract-full').style.display = 'none'; document.getElementById('2211.04972v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 5 figures, RoboCup@Home</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.11223">arXiv:2210.11223</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2210.11223">pdf</a>, <a href="https://arxiv.org/format/2210.11223">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> A Dialogue Robot System to Improve Credibility in Sightseeing Spot Recommendations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yoshimaru%2C+N">Naoki Yoshimaru</a>, <a href="/search/cs?searchtype=author&amp;query=Masuda%2C+T">Tomohiro Masuda</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+H">Hyejin Hong</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusei Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Okuma%2C+M">Motoharu Okuma</a>, <a href="/search/cs?searchtype=author&amp;query=Matsumoto%2C+N">Nagihiro Matsumoto</a>, <a href="/search/cs?searchtype=author&amp;query=Kusu%2C+K">Kazuma Kusu</a>, <a href="/search/cs?searchtype=author&amp;query=Iio%2C+T">Takamasa Iio</a>, <a href="/search/cs?searchtype=author&amp;query=Hatano%2C+K">Kenji Hatano</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.11223v1-abstract-short" style="display: inline;"> Various studies have been conducted on human-supporting robot systems. These systems have been put to practical use over the years and are now seen in our daily lives. In particular, robots communicating smoothly with people are expected to play an active role in customer service and guidance. In this case, it is essential to determine whether the customer is satisfied with the dialog robot or not&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.11223v1-abstract-full').style.display = 'inline'; document.getElementById('2210.11223v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.11223v1-abstract-full" style="display: none;"> Various studies have been conducted on human-supporting robot systems. These systems have been put to practical use over the years and are now seen in our daily lives. In particular, robots communicating smoothly with people are expected to play an active role in customer service and guidance. In this case, it is essential to determine whether the customer is satisfied with the dialog robot or not. However, it is not easy to satisfy all of the customer&#39;s requests due to the diversity of the customer&#39;s speech. In this study, we developed a dialog mechanism that prevents dialog breakdowns and keeps the customer satisfied by providing multiple scenarios for the robot to take control of the dialog. We tested it in a travel destination recommendation task at a travel agency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.11223v1-abstract-full').style.display = 'none'; document.getElementById('2210.11223v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This paper is part of the proceedings of the Dialogue Robot Competition 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.10221">arXiv:2210.10221</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2210.10221">pdf</a>, <a href="https://arxiv.org/format/2210.10221">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ICIP46576.2022.9898014">10.1109/ICIP46576.2022.9898014 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Non-iterative optimization of pseudo-labeling thresholds for training object detection models from multiple datasets </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuki Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Yoshida%2C+S+M">Shuhei M. Yoshida</a>, <a href="/search/cs?searchtype=author&amp;query=Terao%2C+M">Makoto Terao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.10221v1-abstract-short" style="display: inline;"> We propose a non-iterative method to optimize pseudo-labeling thresholds for learning object detection from a collection of low-cost datasets, each of which is annotated for only a subset of all the object classes. A popular approach to this problem is first to train teacher models and then to use their confident predictions as pseudo ground-truth labels when training a student model. To obtain th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.10221v1-abstract-full').style.display = 'inline'; document.getElementById('2210.10221v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.10221v1-abstract-full" style="display: none;"> We propose a non-iterative method to optimize pseudo-labeling thresholds for learning object detection from a collection of low-cost datasets, each of which is annotated for only a subset of all the object classes. A popular approach to this problem is first to train teacher models and then to use their confident predictions as pseudo ground-truth labels when training a student model. To obtain the best result, however, thresholds for prediction confidence must be adjusted. This process typically involves iterative search and repeated training of student models and is time-consuming. Therefore, we develop a method to optimize the thresholds without iterative optimization by maximizing the $F_尾$-score on a validation dataset, which measures the quality of pseudo labels and can be measured without training a student model. We experimentally demonstrate that our proposed method achieves an mAP comparable to that of grid search on the COCO and VOC datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.10221v1-abstract-full').style.display = 'none'; document.getElementById('2210.10221v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICIP2022</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> 2022 IEEE International Conference on Image Processing (ICIP), 2022, pp. 1676-1680 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.05784">arXiv:2210.05784</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2210.05784">pdf</a>, <a href="https://arxiv.org/format/2210.05784">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> REMS: Middleware for Robotics Education and Development </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Mehta%2C+A">Ankur Mehta</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.05784v1-abstract-short" style="display: inline;"> This paper introduces REMS, a robotics middleware and control framework that is designed to introduce the Zen of Python to robotics and to improve robotics education and development flow. Although existing middleware can serve hardware abstraction and modularity, setting up environments and learning middleware-specific syntax and procedures are less viable in education. They can curb opportunities&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.05784v1-abstract-full').style.display = 'inline'; document.getElementById('2210.05784v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.05784v1-abstract-full" style="display: none;"> This paper introduces REMS, a robotics middleware and control framework that is designed to introduce the Zen of Python to robotics and to improve robotics education and development flow. Although existing middleware can serve hardware abstraction and modularity, setting up environments and learning middleware-specific syntax and procedures are less viable in education. They can curb opportunities to understand robotics concepts, theories, and algorithms. Robotics is a field of integration; students and developers from various backgrounds will be involved in programming. Establishing Pythonic and object-oriented robotic framework in a natural way can enhance modular and abstracted programming for better readability, reusability, and simplicity, but also supports useful and practical skills generally in coding. REMS is to be a valuable robot educational medium not just as a tool and to be a platform from one robot to multi-agent across hardware, simulation, and analytical model implementations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.05784v1-abstract-full').style.display = 'none'; document.getElementById('2210.05784v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submission to ICRA2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.03210">arXiv:2209.03210</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.03210">pdf</a>, <a href="https://arxiv.org/format/2209.03210">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Real-to-Sim: Predicting Residual Errors of Robotic Systems with Sparse Data using a Learning-based Unscented Kalman Filter </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Schperberg%2C+A">Alexander Schperberg</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+F">Feng Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Menner%2C+M">Marcel Menner</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+D">Dennis Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.03210v3-abstract-short" style="display: inline;"> Achieving highly accurate dynamic or simulator models that are close to the real robot can facilitate model-based controls (e.g., model predictive control or linear-quadradic regulators), model-based trajectory planning (e.g., trajectory optimization), and decrease the amount of learning time necessary for reinforcement learning methods. Thus, the objective of this work is to learn the residual er&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.03210v3-abstract-full').style.display = 'inline'; document.getElementById('2209.03210v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.03210v3-abstract-full" style="display: none;"> Achieving highly accurate dynamic or simulator models that are close to the real robot can facilitate model-based controls (e.g., model predictive control or linear-quadradic regulators), model-based trajectory planning (e.g., trajectory optimization), and decrease the amount of learning time necessary for reinforcement learning methods. Thus, the objective of this work is to learn the residual errors between a dynamic and/or simulator model and the real robot. This is achieved using a neural network, where the parameters of a neural network are updated through an Unscented Kalman Filter (UKF) formulation. Using this method, we model these residual errors with only small amounts of data -- a necessity as we improve the simulator/dynamic model by learning directly from real-world operation. We demonstrate our method on robotic hardware (e.g., manipulator arm, and a wheeled robot), and show that with the learned residual errors, we can further close the reality gap between dynamic models, simulations, and actual hardware. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.03210v3-abstract-full').style.display = 'none'; document.getElementById('2209.03210v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to Ubiquitous Robotics 2023, Honolulu, Hawaii</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.13975">arXiv:2208.13975</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2208.13975">pdf</a>, <a href="https://arxiv.org/format/2208.13975">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> MRL: Learning to Mix with Attention and Convolutions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mohta%2C+S">Shlok Mohta</a>, <a href="/search/cs?searchtype=author&amp;query=Suganuma%2C+H">Hisahiro Suganuma</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yoshiki Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.13975v1-abstract-short" style="display: inline;"> In this paper, we present a new neural architectural block for the vision domain, named Mixing Regionally and Locally (MRL), developed with the aim of effectively and efficiently mixing the provided input features. We bifurcate the input feature mixing task as mixing at a regional and local scale. To achieve an efficient mix, we exploit the domain-wide receptive field provided by self-attention fo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.13975v1-abstract-full').style.display = 'inline'; document.getElementById('2208.13975v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.13975v1-abstract-full" style="display: none;"> In this paper, we present a new neural architectural block for the vision domain, named Mixing Regionally and Locally (MRL), developed with the aim of effectively and efficiently mixing the provided input features. We bifurcate the input feature mixing task as mixing at a regional and local scale. To achieve an efficient mix, we exploit the domain-wide receptive field provided by self-attention for regional-scale mixing and convolutional kernels restricted to local scale for local-scale mixing. More specifically, our proposed method mixes regional features associated with local features within a defined region, followed by a local-scale features mix augmented by regional features. Experiments show that this hybridization of self-attention and convolution brings improved capacity, generalization (right inductive bias), and efficiency. Under similar network settings, MRL outperforms or is at par with its counterparts in classification, object detection, and segmentation tasks. We also show that our MRL-based network architecture achieves state-of-the-art performance for H&amp;E histology datasets. We achieved DICE of 0.843, 0.855, and 0.892 for Kumar, CoNSep, and CPM-17 datasets, respectively, while highlighting the versatility offered by the MRL framework by incorporating layers like group convolutions to improve dataset-specific generalization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.13975v1-abstract-full').style.display = 'none'; document.getElementById('2208.13975v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.10911">arXiv:2207.10911</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2207.10911">pdf</a>, <a href="https://arxiv.org/ps/2207.10911">ps</a>, <a href="https://arxiv.org/format/2207.10911">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Group Theory">math.GR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Statistics Theory">math.ST</span> </div> </div> <p class="title is-5 mathjax"> Jacobi polynomials and design theory I </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chakraborty%2C+H+S">Himadri Shekhar Chakraborty</a>, <a href="/search/cs?searchtype=author&amp;query=Miezaki%2C+T">Tsuyoshi Miezaki</a>, <a href="/search/cs?searchtype=author&amp;query=Oura%2C+M">Manabu Oura</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuuho Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.10911v3-abstract-short" style="display: inline;"> In this paper, we introduce the notion of Jacobi polynomials with multiple reference vectors of a code, and give the MacWilliams type identity for it. Moreover, we derive a formula to obtain the Jacobi polynomials using the Aronhold polarization operator. Finally, we describe some facts obtained from Type III and Type IV codes that interpret the relation between the Jacobi polynomials and designs. </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.10911v3-abstract-full" style="display: none;"> In this paper, we introduce the notion of Jacobi polynomials with multiple reference vectors of a code, and give the MacWilliams type identity for it. Moreover, we derive a formula to obtain the Jacobi polynomials using the Aronhold polarization operator. Finally, we describe some facts obtained from Type III and Type IV codes that interpret the relation between the Jacobi polynomials and designs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.10911v3-abstract-full').style.display = 'none'; document.getElementById('2207.10911v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> Primary 11T71; Secondary 94B05; 11F11 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.01418">arXiv:2207.01418</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2207.01418">pdf</a>, <a href="https://arxiv.org/format/2207.01418">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/IROS47612.2022.9981579">10.1109/IROS47612.2022.9981579 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Simultaneous Contact-Rich Grasping and Locomotion via Distributed Optimization Enabling Free-Climbing for Multi-Limbed Robots </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shirai%2C+Y">Yuki Shirai</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+X">Xuan Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Schperberg%2C+A">Alexander Schperberg</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Kato%2C+H">Hayato Kato</a>, <a href="/search/cs?searchtype=author&amp;query=Vichathorn%2C+V">Varit Vichathorn</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+D">Dennis Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.01418v2-abstract-short" style="display: inline;"> While motion planning of locomotion for legged robots has shown great success, motion planning for legged robots with dexterous multi-finger grasping is not mature yet. We present an efficient motion planning framework for simultaneously solving locomotion (e.g., centroidal dynamics), grasping (e.g., patch contact), and contact (e.g., gait) problems. To accelerate the planning process, we propose&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.01418v2-abstract-full').style.display = 'inline'; document.getElementById('2207.01418v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.01418v2-abstract-full" style="display: none;"> While motion planning of locomotion for legged robots has shown great success, motion planning for legged robots with dexterous multi-finger grasping is not mature yet. We present an efficient motion planning framework for simultaneously solving locomotion (e.g., centroidal dynamics), grasping (e.g., patch contact), and contact (e.g., gait) problems. To accelerate the planning process, we propose distributed optimization frameworks based on Alternating Direction Methods of Multipliers (ADMM) to solve the original large-scale Mixed-Integer NonLinear Programming (MINLP). The resulting frameworks use Mixed-Integer Quadratic Programming (MIQP) to solve contact and NonLinear Programming (NLP) to solve nonlinear dynamics, which are more computationally tractable and less sensitive to parameters. Also, we explicitly enforce patch contact constraints from limit surfaces with micro-spine grippers. We demonstrate our proposed framework in the hardware experiments, showing that the multi-limbed robot is able to realize various motions including free-climbing at a slope angle 45掳 with a much shorter planning time. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.01418v2-abstract-full').style.display = 'none'; document.getElementById('2207.01418v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for the 2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS 2022). Hardware implementation videos: https://youtu.be/QLH1shghqQ0</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.01180">arXiv:2207.01180</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2207.01180">pdf</a>, <a href="https://arxiv.org/format/2207.01180">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> SCALER: A Tough Versatile Quadruped Free-Climber Robot </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Shirai%2C+Y">Yuki Shirai</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+X">Xuan Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Schperberg%2C+A">Alexander Schperberg</a>, <a href="/search/cs?searchtype=author&amp;query=Kato%2C+H">Hayato Kato</a>, <a href="/search/cs?searchtype=author&amp;query=Swerdlow%2C+A">Alexander Swerdlow</a>, <a href="/search/cs?searchtype=author&amp;query=Kumagai%2C+N">Naoya Kumagai</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+D">Dennis Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.01180v3-abstract-short" style="display: inline;"> This paper introduces SCALER, a quadrupedal robot that demonstrates climbing on bouldering walls, overhangs, ceilings and trotting on the ground. SCALER is one of the first high-degrees of freedom four-limbed robots that can free-climb under the Earth&#39;s gravity and one of the most mechanically efficient quadrupeds on the ground. Where other state-of-the-art climbers specialize in climbing, SCALER&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.01180v3-abstract-full').style.display = 'inline'; document.getElementById('2207.01180v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.01180v3-abstract-full" style="display: none;"> This paper introduces SCALER, a quadrupedal robot that demonstrates climbing on bouldering walls, overhangs, ceilings and trotting on the ground. SCALER is one of the first high-degrees of freedom four-limbed robots that can free-climb under the Earth&#39;s gravity and one of the most mechanically efficient quadrupeds on the ground. Where other state-of-the-art climbers specialize in climbing, SCALER promises practical free-climbing with payload \textit{and} ground locomotion, which realizes true versatile mobility. A new climbing gait, SKATE gait, increases the payload by utilizing the SCALER body linkage mechanism. SCALER achieves a maximum normalized locomotion speed of $1.87$ /s, or $0.56$ m/s on the ground and $1.0$ /min, or $0.35$ m/min in bouldering wall climbing. Payload capacity reaches $233$ % of the SCALER weight on the ground and $35$ % on the vertical wall. Our GOAT gripper, a mechanically adaptable underactuated two-finger gripper, successfully grasps convex and non-convex objects and supports SCALER. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.01180v3-abstract-full').style.display = 'none'; document.getElementById('2207.01180v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Proceeding to IROS 2022, Preprint and not a final version</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.01033">arXiv:2207.01033</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2207.01033">pdf</a>, <a href="https://arxiv.org/format/2207.01033">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Adaptive Force Controller for Contact-Rich Robotic Systems using an Unscented Kalman Filter </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Schperberg%2C+A">Alexander Schperberg</a>, <a href="/search/cs?searchtype=author&amp;query=Shirai%2C+Y">Yuki Shirai</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+X">Xuan Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+D">Dennis Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.01033v5-abstract-short" style="display: inline;"> In multi-point contact systems, precise force control is crucial for achieving stable and safe interactions between robots and their environment. Thus, we demonstrate an admittance controller with auto-tuning that can be applied for these systems. The controller&#39;s objective is to track the target wrench profiles of each contact point while considering the additional torque due to rotational fricti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.01033v5-abstract-full').style.display = 'inline'; document.getElementById('2207.01033v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.01033v5-abstract-full" style="display: none;"> In multi-point contact systems, precise force control is crucial for achieving stable and safe interactions between robots and their environment. Thus, we demonstrate an admittance controller with auto-tuning that can be applied for these systems. The controller&#39;s objective is to track the target wrench profiles of each contact point while considering the additional torque due to rotational friction. Our admittance controller is adaptive during online operation by using an auto-tuning method that tunes the gains of the controller while following user-specified training objectives. These objectives include facilitating controller stability, such as tracking the wrench profiles as closely as possible, ensuring control outputs are within force limits that minimize slippage, and avoiding configurations that induce kinematic singularity. We demonstrate the robustness of our controller on hardware for both manipulation and locomotion tasks using a multi-limbed climbing robot. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.01033v5-abstract-full').style.display = 'none'; document.getElementById('2207.01033v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to IEEE RAS International Conference on Humanoid Robots 2023, December 12-14 in Austin, Texas, USA</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.12141">arXiv:2206.12141</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.12141">pdf</a>, <a href="https://arxiv.org/format/2206.12141">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Aggregated Multi-output Gaussian Processes with Knowledge Transfer Across Domains </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+T">Toshiyuki Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Iwata%2C+T">Tomoharu Iwata</a>, <a href="/search/cs?searchtype=author&amp;query=Kurashima%2C+T">Takeshi Kurashima</a>, <a href="/search/cs?searchtype=author&amp;query=Okawa%2C+M">Maya Okawa</a>, <a href="/search/cs?searchtype=author&amp;query=Akagi%2C+Y">Yasunori Akagi</a>, <a href="/search/cs?searchtype=author&amp;query=Toda%2C+H">Hiroyuki Toda</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.12141v1-abstract-short" style="display: inline;"> Aggregate data often appear in various fields such as socio-economics and public security. The aggregate data are associated not with points but with supports (e.g., spatial regions in a city). Since the supports may have various granularities depending on attributes (e.g., poverty rate and crime rate), modeling such data is not straightforward. This article offers a multi-output Gaussian process&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.12141v1-abstract-full').style.display = 'inline'; document.getElementById('2206.12141v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.12141v1-abstract-full" style="display: none;"> Aggregate data often appear in various fields such as socio-economics and public security. The aggregate data are associated not with points but with supports (e.g., spatial regions in a city). Since the supports may have various granularities depending on attributes (e.g., poverty rate and crime rate), modeling such data is not straightforward. This article offers a multi-output Gaussian process (MoGP) model that infers functions for attributes using multiple aggregate datasets of respective granularities. In the proposed model, the function for each attribute is assumed to be a dependent GP modeled as a linear mixing of independent latent GPs. We design an observation model with an aggregation process for each attribute; the process is an integral of the GP over the corresponding support. We also introduce a prior distribution of the mixing weights, which allows a knowledge transfer across domains (e.g., cities) by sharing the prior. This is advantageous in such a situation where the spatially aggregated dataset in a city is too coarse to interpolate; the proposed model can still make accurate predictions of attributes by utilizing aggregate datasets in other cities. The inference of the proposed model is based on variational Bayes, which enables one to learn the model parameters using the aggregate datasets from multiple domains. The experiments demonstrate that the proposed model outperforms in the task of refining coarse-grained aggregate data on real-world datasets: Time series of air pollutants in Beijing and various kinds of spatial datasets from New York City and Chicago. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.12141v1-abstract-full').style.display = 'none'; document.getElementById('2206.12141v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work has been submitted to the IEEE for possible publication</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.12756">arXiv:2205.12756</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.12756">pdf</a>, <a href="https://arxiv.org/format/2205.12756">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Development of a Stereo-Vision Based High-Throughput Robotic System for Mouse Tail Vein Injection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ko%2C+T">Tianyi Ko</a>, <a href="/search/cs?searchtype=author&amp;query=Nishiwaki%2C+K">Koichi Nishiwaki</a>, <a href="/search/cs?searchtype=author&amp;query=Terada%2C+K">Koji Terada</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Mitsumata%2C+S">Shun Mitsumata</a>, <a href="/search/cs?searchtype=author&amp;query=Katagiri%2C+R">Ryuichi Katagiri</a>, <a href="/search/cs?searchtype=author&amp;query=Junko%2C+T">Taketo Junko</a>, <a href="/search/cs?searchtype=author&amp;query=Horiba%2C+N">Naoshi Horiba</a>, <a href="/search/cs?searchtype=author&amp;query=Igata%2C+H">Hideyoshi Igata</a>, <a href="/search/cs?searchtype=author&amp;query=Mizuno%2C+K">Kazue Mizuno</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.12756v1-abstract-short" style="display: inline;"> In this paper, we present a robotic device for mouse tail vein injection. We propose a mouse holding mechanism to realize vein injection without anesthetizing the mouse, which consists of a tourniquet, vacuum port, and adaptive tail-end fixture. The position of the target vein in 3D space is reconstructed from a high-resolution stereo vision. The vein is detected by a simple but robust vein line d&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.12756v1-abstract-full').style.display = 'inline'; document.getElementById('2205.12756v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.12756v1-abstract-full" style="display: none;"> In this paper, we present a robotic device for mouse tail vein injection. We propose a mouse holding mechanism to realize vein injection without anesthetizing the mouse, which consists of a tourniquet, vacuum port, and adaptive tail-end fixture. The position of the target vein in 3D space is reconstructed from a high-resolution stereo vision. The vein is detected by a simple but robust vein line detector. Thanks to the proposed two-staged calibration process, the total time for the injection process is limited to 1.5 minutes, despite that the position of needle and tail vein varies for each trial. We performed an injection experiment targeting 40 mice and succeeded to inject saline to 37 of them, resulting 92.5% success ratio. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.12756v1-abstract-full').style.display = 'none'; document.getElementById('2205.12756v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted to ICRA2022 (7 pages, 11 figures, 2 tables)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.01714">arXiv:2112.01714</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.01714">pdf</a>, <a href="https://arxiv.org/format/2112.01714">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Structure-Aware Multi-Hop Graph Convolution for Graph Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuichi Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.01714v1-abstract-short" style="display: inline;"> In this paper, we propose a spatial graph convolution (GC) to classify signals on a graph. Existing GC methods are limited to using the structural information in the feature space. Additionally, the single step of GCs only uses features on the one-hop neighboring nodes from the target node. In this paper, we propose two methods to improve the performance of GCs: 1) Utilizing structural information&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.01714v1-abstract-full').style.display = 'inline'; document.getElementById('2112.01714v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.01714v1-abstract-full" style="display: none;"> In this paper, we propose a spatial graph convolution (GC) to classify signals on a graph. Existing GC methods are limited to using the structural information in the feature space. Additionally, the single step of GCs only uses features on the one-hop neighboring nodes from the target node. In this paper, we propose two methods to improve the performance of GCs: 1) Utilizing structural information in the feature space, and 2) exploiting the multi-hop information in one GC step. In the first method, we define three structural features in the feature space: feature angle, feature distance, and relational embedding. The second method aggregates the node-wise features of multi-hop neighbors in a GC. Both methods can be simultaneously used. We also propose graph neural networks (GNNs) integrating the proposed GC for classifying nodes in 3D point clouds and citation networks. In experiments, the proposed GNNs exhibited a higher classification accuracy than existing methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.01714v1-abstract-full').style.display = 'none'; document.getElementById('2112.01714v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.00083">arXiv:2110.00083</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.00083">pdf</a>, <a href="https://arxiv.org/format/2110.00083">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> An Under-Actuated Whippletree Mechanism Gripper based on Multi-Objective Design Optimization with Auto-Tuned Weights </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Shirai%2C+Y">Yuki Shirai</a>, <a href="/search/cs?searchtype=author&amp;query=Lacey%2C+Z">Zachary Lacey</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+X">Xuan Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Jane Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+D">Dennis Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.00083v1-abstract-short" style="display: inline;"> Current rigid linkage grippers are limited in flexibility, and gripper design optimality relies on expertise, experiments, or arbitrary parameters. Our proposed rigid gripper can accommodate irregular and off-center objects through a whippletree mechanism, improving adaptability. We present a whippletree-based rigid under-actuated gripper and its parametric design multi-objective optimization for&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.00083v1-abstract-full').style.display = 'inline'; document.getElementById('2110.00083v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.00083v1-abstract-full" style="display: none;"> Current rigid linkage grippers are limited in flexibility, and gripper design optimality relies on expertise, experiments, or arbitrary parameters. Our proposed rigid gripper can accommodate irregular and off-center objects through a whippletree mechanism, improving adaptability. We present a whippletree-based rigid under-actuated gripper and its parametric design multi-objective optimization for a one-wall climbing task. Our proposed objective function considers kinematics and grasping forces simultaneously with a mathematical metric based on a model of an object environment. Our multi-objective problem is formulated as a single kinematic objective function with auto-tuning force-based weight. Our results indicate that our proposed objective function determines optimal parameters and kinematic ranges for our under-actuated gripper in the task environment with sufficient grasping forces. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.00083v1-abstract-full').style.display = 'none'; document.getElementById('2110.00083v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for IROS 2021</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> 2110.00083 2021 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.15910">arXiv:2106.15910</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.15910">pdf</a>, <a href="https://arxiv.org/format/2106.15910">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TSP.2022.3180546">10.1109/TSP.2022.3180546 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Graph Signal Restoration Using Nested Deep Algorithm Unrolling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nagahama%2C+M">Masatoshi Nagahama</a>, <a href="/search/cs?searchtype=author&amp;query=Yamada%2C+K">Koki Yamada</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuichi Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Chan%2C+S+H">Stanley H. Chan</a>, <a href="/search/cs?searchtype=author&amp;query=Eldar%2C+Y+C">Yonina C. Eldar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.15910v3-abstract-short" style="display: inline;"> Graph signal processing is a ubiquitous task in many applications such as sensor, social, transportation and brain networks, point cloud processing, and graph neural networks. Often, graph signals are corrupted in the sensing process, thus requiring restoration. In this paper, we propose two graph signal restoration methods based on deep algorithm unrolling (DAU). First, we present a graph signal&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.15910v3-abstract-full').style.display = 'inline'; document.getElementById('2106.15910v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.15910v3-abstract-full" style="display: none;"> Graph signal processing is a ubiquitous task in many applications such as sensor, social, transportation and brain networks, point cloud processing, and graph neural networks. Often, graph signals are corrupted in the sensing process, thus requiring restoration. In this paper, we propose two graph signal restoration methods based on deep algorithm unrolling (DAU). First, we present a graph signal denoiser by unrolling iterations of the alternating direction method of multiplier (ADMM). We then suggest a general restoration method for linear degradation by unrolling iterations of Plug-and-Play ADMM (PnP-ADMM). In the second approach, the unrolled ADMM-based denoiser is incorporated as a submodule, leading to a nested DAU structure. The parameters in the proposed denoising/restoration methods are trainable in an end-to-end manner. Our approach is interpretable and keeps the number of parameters small since we only tune graph-independent regularization parameters. We overcome two main challenges in existing graph signal restoration methods: 1) limited performance of convex optimization algorithms due to fixed parameters which are often determined manually. 2) large number of parameters of graph neural networks that result in difficulty of training. Several experiments for graph signal denoising and interpolation are performed on synthetic and real-world data. The proposed methods show performance improvements over several existing techniques in terms of root mean squared error in both tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.15910v3-abstract-full').style.display = 'none'; document.getElementById('2106.15910v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.11152">arXiv:2105.11152</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2105.11152">pdf</a>, <a href="https://arxiv.org/format/2105.11152">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3447548.3467248">10.1145/3447548.3467248 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Dynamic Hawkes Processes for Discovering Time-evolving Communities&#39; States behind Diffusion Processes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Okawa%2C+M">Maya Okawa</a>, <a href="/search/cs?searchtype=author&amp;query=Iwata%2C+T">Tomoharu Iwata</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Toda%2C+H">Hiroyuki Toda</a>, <a href="/search/cs?searchtype=author&amp;query=Kurashima%2C+T">Takeshi Kurashima</a>, <a href="/search/cs?searchtype=author&amp;query=Kashima%2C+H">Hisashi Kashima</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.11152v2-abstract-short" style="display: inline;"> Sequences of events including infectious disease outbreaks, social network activities, and crimes are ubiquitous and the data on such events carry essential information about the underlying diffusion processes between communities (e.g., regions, online user groups). Modeling diffusion processes and predicting future events are crucial in many applications including epidemic control, viral marketin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.11152v2-abstract-full').style.display = 'inline'; document.getElementById('2105.11152v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.11152v2-abstract-full" style="display: none;"> Sequences of events including infectious disease outbreaks, social network activities, and crimes are ubiquitous and the data on such events carry essential information about the underlying diffusion processes between communities (e.g., regions, online user groups). Modeling diffusion processes and predicting future events are crucial in many applications including epidemic control, viral marketing, and predictive policing. Hawkes processes offer a central tool for modeling the diffusion processes, in which the influence from the past events is described by the triggering kernel. However, the triggering kernel parameters, which govern how each community is influenced by the past events, are assumed to be static over time. In the real world, the diffusion processes depend not only on the influences from the past, but also the current (time-evolving) states of the communities, e.g., people&#39;s awareness of the disease and people&#39;s current interests. In this paper, we propose a novel Hawkes process model that is able to capture the underlying dynamics of community states behind the diffusion processes and predict the occurrences of events based on the dynamics. Specifically, we model the latent dynamic function that encodes these hidden dynamics by a mixture of neural networks. Then we design the triggering kernel using the latent dynamic function and its integral. The proposed method, termed DHP (Dynamic Hawkes Processes), offers a flexible way to learn complex representations of the time-evolving communities&#39; states, while at the same time it allows to computing the exact likelihood, which makes parameter learning tractable. Extensive experiments on four real-world event datasets show that DHP outperforms five widely adopted methods for event prediction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.11152v2-abstract-full').style.display = 'none'; document.getElementById('2105.11152v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD &#39;21)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> G.3; J.4 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.03871">arXiv:2104.03871</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2104.03871">pdf</a>, <a href="https://arxiv.org/format/2104.03871">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Molecular Networks">q-bio.MN</span> </div> </div> <p class="title is-5 mathjax"> Complex network prediction using deep learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yoshihisa Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Kojima%2C+R">Ryosuke Kojima</a>, <a href="/search/cs?searchtype=author&amp;query=Ishida%2C+S">Shoichi Ishida</a>, <a href="/search/cs?searchtype=author&amp;query=Yamashita%2C+F">Fumiyoshi Yamashita</a>, <a href="/search/cs?searchtype=author&amp;query=Okuno%2C+Y">Yasushi Okuno</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.03871v1-abstract-short" style="display: inline;"> Systematic relations between multiple objects that occur in various fields can be represented as networks. Real-world networks typically exhibit complex topologies whose structural properties are key factors in characterizing and further exploring the networks themselves. Uncertainty, modelling procedures and measurement difficulties raise often insurmountable challenges in fully characterizing mo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.03871v1-abstract-full').style.display = 'inline'; document.getElementById('2104.03871v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.03871v1-abstract-full" style="display: none;"> Systematic relations between multiple objects that occur in various fields can be represented as networks. Real-world networks typically exhibit complex topologies whose structural properties are key factors in characterizing and further exploring the networks themselves. Uncertainty, modelling procedures and measurement difficulties raise often insurmountable challenges in fully characterizing most of the known real-world networks; hence, the necessity to predict their unknown elements from the limited data currently available in order to estimate possible future relations and/or to unveil unmeasurable relations. In this work, we propose a deep learning approach to this problem based on Graph Convolutional Networks for predicting networks while preserving their original structural properties. The study reveals that this method can preserve scale-free and small-world properties of complex networks when predicting their unknown parts, a feature lacked by the up-to-date conventional methods. An external validation realized by testing the approach on biological networks confirms the results, initially obtained on artificial data. Moreover, this process provides new insights into the retainability of network structure properties in network prediction. We anticipate that our work could inspire similar approaches in other research fields as well, where unknown mechanisms behind complex systems need to be revealed by combining machine-based and experiment-based methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.03871v1-abstract-full').style.display = 'none'; document.getElementById('2104.03871v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 16 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.14249">arXiv:2103.14249</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2103.14249">pdf</a>, <a href="https://arxiv.org/format/2103.14249">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/APSIPAASC58517.2023.10317279">10.1109/APSIPAASC58517.2023.10317279 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Marine Snow Removal Benchmarking Dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kaneko%2C+R">Reina Kaneko</a>, <a href="/search/cs?searchtype=author&amp;query=Sato%2C+Y">Yuya Sato</a>, <a href="/search/cs?searchtype=author&amp;query=Ueda%2C+T">Takumi Ueda</a>, <a href="/search/cs?searchtype=author&amp;query=Higashi%2C+H">Hiroshi Higashi</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuichi Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.14249v3-abstract-short" style="display: inline;"> This paper introduces a new benchmarking dataset for marine snow removal of underwater images. Marine snow is one of the main degradation sources of underwater images that are caused by small particles, e.g., organic matter and sand, between the underwater scene and photosensors. We mathematically model two typical types of marine snow from the observations of real underwater images. The modeled a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.14249v3-abstract-full').style.display = 'inline'; document.getElementById('2103.14249v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.14249v3-abstract-full" style="display: none;"> This paper introduces a new benchmarking dataset for marine snow removal of underwater images. Marine snow is one of the main degradation sources of underwater images that are caused by small particles, e.g., organic matter and sand, between the underwater scene and photosensors. We mathematically model two typical types of marine snow from the observations of real underwater images. The modeled artifacts are synthesized with underwater images to construct large-scale pairs of ground truth and degraded images to calculate objective qualities for marine snow removal and to train a deep neural network. We propose two marine snow removal tasks using the dataset and show the first benchmarking results of marine snow removal. The Marine Snow Removal Benchmarking Dataset is publicly available online. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.14249v3-abstract-full').style.display = 'none'; document.getElementById('2103.14249v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">APSIPA ASC 2023, Taipei, Taiwan, Nov. 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.10120">arXiv:2012.10120</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2012.10120">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Technical Progress Analysis Using a Dynamic Topic Model for Technical Terms to Revise Patent Classification Codes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Iwata%2C+M">Mana Iwata</a>, <a href="/search/cs?searchtype=author&amp;query=Matsuda%2C+Y">Yoshiro Matsuda</a>, <a href="/search/cs?searchtype=author&amp;query=Utsumi%2C+Y">Yoshimasa Utsumi</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yoshitoshi Tanaka</a>, <a href="/search/cs?searchtype=author&amp;query=Nakata%2C+K">Kazuhide Nakata</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.10120v1-abstract-short" style="display: inline;"> Japanese patents are assigned a patent classification code, FI (File Index), that is unique to Japan. FI is a subdivision of the IPC, an international patent classification code, that is related to Japanese technology. FIs are revised to keep up with technological developments. These revisions have already established more than 30,000 new FIs since 2006. However, these revisions require a lot of t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.10120v1-abstract-full').style.display = 'inline'; document.getElementById('2012.10120v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.10120v1-abstract-full" style="display: none;"> Japanese patents are assigned a patent classification code, FI (File Index), that is unique to Japan. FI is a subdivision of the IPC, an international patent classification code, that is related to Japanese technology. FIs are revised to keep up with technological developments. These revisions have already established more than 30,000 new FIs since 2006. However, these revisions require a lot of time and workload. Moreover, these revisions are not automated and are thus inefficient. Therefore, using machine learning to assist in the revision of patent classification codes (FI) will lead to improved accuracy and efficiency. This study analyzes patent documents from this new perspective of assisting in the revision of patent classification codes with machine learning. To analyze time-series changes in patents, we used the dynamic topic model (DTM), which is an extension of the latent Dirichlet allocation (LDA). Also, unlike English, the Japanese language requires morphological analysis. Patents contain many technical words that are not used in everyday life, so morphological analysis using a common dictionary is not sufficient. Therefore, we used a technique for extracting technical terms from text. After extracting technical terms, we applied them to DTM. In this study, we determined the technological progress of the lighting class F21 for 14 years and compared it with the actual revision of patent classification codes. In other words, we extracted technical terms from Japanese patents and applied DTM to determine the progress of Japanese technology. Then, we analyzed the results from the new perspective of revising patent classification codes with machine learning. As a result, it was found that those whose topics were on the rise were judged to be new technologies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.10120v1-abstract-full').style.display = 'none'; document.getElementById('2012.10120v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.14002">arXiv:2010.14002</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2010.14002">pdf</a>, <a href="https://arxiv.org/format/2010.14002">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Graph Blind Deconvolution with Sparseness Constraint </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Iwata%2C+K">Kazuma Iwata</a>, <a href="/search/cs?searchtype=author&amp;query=Yamada%2C+K">Koki Yamada</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yuichi Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.14002v1-abstract-short" style="display: inline;"> We propose a blind deconvolution method for signals on graphs, with the exact sparseness constraint for the original signal. Graph blind deconvolution is an algorithm for estimating the original signal on a graph from a set of blurred and noisy measurements. Imposing a constraint on the number of nonzero elements is desirable for many different applications. This paper deals with the problem with&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.14002v1-abstract-full').style.display = 'inline'; document.getElementById('2010.14002v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.14002v1-abstract-full" style="display: none;"> We propose a blind deconvolution method for signals on graphs, with the exact sparseness constraint for the original signal. Graph blind deconvolution is an algorithm for estimating the original signal on a graph from a set of blurred and noisy measurements. Imposing a constraint on the number of nonzero elements is desirable for many different applications. This paper deals with the problem with constraints placed on the exact number of original sources, which is given by an optimization problem with an $\ell_0$ norm constraint. We solve this non-convex optimization problem using the ADMM iterative solver. Numerical experiments using synthetic signals demonstrate the effectiveness of the proposed method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.14002v1-abstract-full').style.display = 'none'; document.getElementById('2010.14002v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.04360">arXiv:2010.04360</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2010.04360">pdf</a>, <a href="https://arxiv.org/format/2010.04360">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Few-shot Learning for Spatial Regression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Iwata%2C+T">Tomoharu Iwata</a>, <a href="/search/cs?searchtype=author&amp;query=Tanaka%2C+Y">Yusuke Tanaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.04360v1-abstract-short" style="display: inline;"> We propose a few-shot learning method for spatial regression. Although Gaussian processes (GPs) have been successfully used for spatial regression, they require many observations in the target task to achieve a high predictive performance. Our model is trained using spatial datasets on various attributes in various regions, and predicts values on unseen attributes in unseen regions given a few obs&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.04360v1-abstract-full').style.display = 'inline'; document.getElementById('2010.04360v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.04360v1-abstract-full" style="display: none;"> We propose a few-shot learning method for spatial regression. Although Gaussian processes (GPs) have been successfully used for spatial regression, they require many observations in the target task to achieve a high predictive performance. Our model is trained using spatial datasets on various attributes in various regions, and predicts values on unseen attributes in unseen regions given a few observed data. With our model, a task representation is inferred from given small data using a neural network. Then, spatial values are predicted by neural networks with a GP framework, in which task-specific properties are controlled by the task representations. The GP framework allows us to analytically obtain predictions that are adapted to small data. By using the adapted predictions in the objective function, we can train our model efficiently and effectively so that the test predictive performance improves when adapted to newly given small data. In our experiments, we demonstrate that the proposed method achieves better predictive performance than existing meta-learning methods using spatial datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.04360v1-abstract-full').style.display = 'none'; document.getElementById('2010.04360v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Tanaka%2C+Y&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Tanaka%2C+Y&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Tanaka%2C+Y&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10