CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–47 of 47 results for author: <span class="mathjax">Mehta, V</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Mehta%2C+V">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Mehta, V"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Mehta%2C+V&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Mehta, V"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.23642">arXiv:2410.23642</a> <span> [<a href="https://arxiv.org/pdf/2410.23642">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Novel Clinical-Grade Prostate Cancer Detection and Grading Model: Development and Prospective Validation Using Real World Data, with Performance Assessment on IHC Requested Cases </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Nateghi%2C+R">Ramin Nateghi</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+R">Ruoji Zhou</a>, <a href="/search/cs?searchtype=author&query=Saft%2C+M">Madeline Saft</a>, <a href="/search/cs?searchtype=author&query=Schnauss%2C+M">Marina Schnauss</a>, <a href="/search/cs?searchtype=author&query=Neill%2C+C">Clayton Neill</a>, <a href="/search/cs?searchtype=author&query=Alam%2C+R">Ridwan Alam</a>, <a href="/search/cs?searchtype=author&query=Handa%2C+N">Nicole Handa</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+M">Mitchell Huang</a>, <a href="/search/cs?searchtype=author&query=Li%2C+E+V">Eric V Li</a>, <a href="/search/cs?searchtype=author&query=Goldstein%2C+J+A">Jeffery A Goldstein</a>, <a href="/search/cs?searchtype=author&query=Schaeffer%2C+E+M">Edward M Schaeffer</a>, <a href="/search/cs?searchtype=author&query=Nadim%2C+M">Menatalla Nadim</a>, <a href="/search/cs?searchtype=author&query=Pourakpour%2C+F">Fattaneh Pourakpour</a>, <a href="/search/cs?searchtype=author&query=Isaila%2C+B">Bogdan Isaila</a>, <a href="/search/cs?searchtype=author&query=Felicelli%2C+C">Christopher Felicelli</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Vikas Mehta</a>, <a href="/search/cs?searchtype=author&query=Nezami%2C+B+G">Behtash G Nezami</a>, <a href="/search/cs?searchtype=author&query=Ross%2C+A">Ashley Ross</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+X">Ximing Yang</a>, <a href="/search/cs?searchtype=author&query=Cooper%2C+L+A">Lee AD Cooper</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.23642v1-abstract-short" style="display: inline;"> Artificial intelligence may assist healthcare systems in meeting increasing demand for pathology services while maintaining diagnostic quality and reducing turnaround time and costs. We aimed to investigate the performance of an institutionally developed system for prostate cancer detection, grading, and workflow optimization and to contrast this with commercial alternatives. From August 2021 to M… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23642v1-abstract-full').style.display = 'inline'; document.getElementById('2410.23642v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.23642v1-abstract-full" style="display: none;"> Artificial intelligence may assist healthcare systems in meeting increasing demand for pathology services while maintaining diagnostic quality and reducing turnaround time and costs. We aimed to investigate the performance of an institutionally developed system for prostate cancer detection, grading, and workflow optimization and to contrast this with commercial alternatives. From August 2021 to March 2023, we scanned 21,396 slides from 1,147 patients with positive biopsies. We developed models for cancer detection, grading, and screening of equivocal cases for IHC ordering. We compared a task-specific model trained using the PANDA dataset of prostate cancer biopsies with one built using features extracted by the general-purpose histology foundation model, UNI and compare their performance in an unfiltered prospectively collected dataset that reflects our patient population (1737 slides,95 patients). We evaluated the contributions of a bespoke model designed to improve sensitivity in detecting small cancer foci and scoring of broader patterns observed at lower resolution. We found high concordance between the developed systems and pathologist reference in detection (AUC 98.5, sensitivity 95.0, and specificity 97.8), ISUP grading (quadratic Cohen's kappa 0.869), grade group 3 or higher (AUC 97.5, sensitivity 94.9, specificity 96.6) and comparable to published data from commercial systems. Screening could reduce IHC ordering for equivocal cases by 44.5% with an overall error rate of 1.8% (1.4% false positive, 0.4% false negative rates). Institutions like academic medical centers that have high scanning volumes and report abstraction capabilities can develop accurate computational pathology models for internal use. These models have the potential to aid in quality control role and to improve workflow in the pathology lab to help meet future challenges in prostate cancer diagnosis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23642v1-abstract-full').style.display = 'none'; document.getElementById('2410.23642v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.19184">arXiv:2409.19184</a> <span> [<a href="https://arxiv.org/pdf/2409.19184">pdf</a>, <a href="https://arxiv.org/format/2409.19184">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Learning-Based Image Compression for Machines </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gupta%2C+K">Kartik Gupta</a>, <a href="/search/cs?searchtype=author&query=Faria%2C+K">Kimberley Faria</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Vikas Mehta</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.19184v1-abstract-short" style="display: inline;"> While learning based compression techniques for images have outperformed traditional methods, they have not been widely adopted in machine learning pipelines. This is largely due to lack of standardization and lack of retention of salient features needed for such tasks. Decompression of images have taken a back seat in recent years while the focus has shifted to an image's utility in performing ma… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19184v1-abstract-full').style.display = 'inline'; document.getElementById('2409.19184v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.19184v1-abstract-full" style="display: none;"> While learning based compression techniques for images have outperformed traditional methods, they have not been widely adopted in machine learning pipelines. This is largely due to lack of standardization and lack of retention of salient features needed for such tasks. Decompression of images have taken a back seat in recent years while the focus has shifted to an image's utility in performing machine learning based analysis on top of them. Thus the demand for compression pipelines that incorporate such features from images has become ever present. The methods outlined in the report build on the recent work done on learning based image compression techniques to incorporate downstream tasks in them. We propose various methods of finetuning and enhancing different parts of pretrained compression encoding pipeline and present the results of our investigation regarding the performance of vision tasks using compression based pipelines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19184v1-abstract-full').style.display = 'none'; document.getElementById('2409.19184v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.08472">arXiv:2409.08472</a> <span> [<a href="https://arxiv.org/pdf/2409.08472">pdf</a>, <a href="https://arxiv.org/format/2409.08472">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> An Intent Modeling and Inference Framework for Autonomous and Remotely Piloted Aerial Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kaza%2C+K">Kesav Kaza</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Varun Mehta</a>, <a href="/search/cs?searchtype=author&query=Azad%2C+H">Hamid Azad</a>, <a href="/search/cs?searchtype=author&query=Bolic%2C+M">Miodrag Bolic</a>, <a href="/search/cs?searchtype=author&query=Mantegh%2C+I">Iraj Mantegh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.08472v1-abstract-short" style="display: inline;"> An intent modelling and inference framework is presented to assist the defense planning for protecting a geo-fence against unauthorized flights. First, a novel mathematical definition for the intent of an uncrewed aircraft system (UAS) is presented. The concepts of critical waypoints and critical waypoint patterns are introduced and associated with a motion process to fully characterize an intent.… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.08472v1-abstract-full').style.display = 'inline'; document.getElementById('2409.08472v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.08472v1-abstract-full" style="display: none;"> An intent modelling and inference framework is presented to assist the defense planning for protecting a geo-fence against unauthorized flights. First, a novel mathematical definition for the intent of an uncrewed aircraft system (UAS) is presented. The concepts of critical waypoints and critical waypoint patterns are introduced and associated with a motion process to fully characterize an intent. This modelling framework consists of representations of a UAS mission planner, used to plan the aircraft's motion sequence, as well as a defense planner, defined to protect the geo-fence. It is applicable to autonomous, semi-autonomous, and piloted systems in 2D and 3D environments with obstacles. The framework is illustrated by defining a library of intents for a security application. Detection and tracking of the target are presumed for formulating the intent inference problem. Multiple formulations of the decision maker's objective are discussed as part of a deep-learning-based methodology. Further, a multi-modal dynamic model for characterizing the UAS flight is discussed. This is later utilized to extract features using the interacting multiple model (IMM) filter for training the intent classifier. Finally, as part of the simulation study, an attention-based bi-directional long short-term memory (Bi-LSTM) network for intent inference is presented. The simulation experiments illustrate various aspects of the framework, including trajectory generation, radar measurement simulation, etc., in 2D and 3D environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.08472v1-abstract-full').style.display = 'none'; document.getElementById('2409.08472v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 7 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.20304">arXiv:2405.20304</a> <span> [<a href="https://arxiv.org/pdf/2405.20304">pdf</a>, <a href="https://arxiv.org/format/2405.20304">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Group Robust Preference Optimization in Reward-free RLHF </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ramesh%2C+S+S">Shyam Sundhar Ramesh</a>, <a href="/search/cs?searchtype=author&query=Hu%2C+Y">Yifan Hu</a>, <a href="/search/cs?searchtype=author&query=Chaimalas%2C+I">Iason Chaimalas</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Viraj Mehta</a>, <a href="/search/cs?searchtype=author&query=Sessa%2C+P+G">Pier Giuseppe Sessa</a>, <a href="/search/cs?searchtype=author&query=Ammar%2C+H+B">Haitham Bou Ammar</a>, <a href="/search/cs?searchtype=author&query=Bogunovic%2C+I">Ilija Bogunovic</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.20304v1-abstract-short" style="display: inline;"> Adapting large language models (LLMs) for specific tasks usually involves fine-tuning through reinforcement learning with human feedback (RLHF) on preference data. While these data often come from diverse labelers' groups (e.g., different demographics, ethnicities, company teams, etc.), traditional RLHF approaches adopt a "one-size-fits-all" approach, i.e., they indiscriminately assume and optimiz… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.20304v1-abstract-full').style.display = 'inline'; document.getElementById('2405.20304v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.20304v1-abstract-full" style="display: none;"> Adapting large language models (LLMs) for specific tasks usually involves fine-tuning through reinforcement learning with human feedback (RLHF) on preference data. While these data often come from diverse labelers' groups (e.g., different demographics, ethnicities, company teams, etc.), traditional RLHF approaches adopt a "one-size-fits-all" approach, i.e., they indiscriminately assume and optimize a single preference model, thus not being robust to unique characteristics and needs of the various groups. To address this limitation, we propose a novel Group Robust Preference Optimization (GRPO) method to align LLMs to individual groups' preferences robustly. Our approach builds upon reward-free direct preference optimization methods, but unlike previous approaches, it seeks a robust policy which maximizes the worst-case group performance. To achieve this, GRPO adaptively and sequentially weights the importance of different groups, prioritizing groups with worse cumulative loss. We theoretically study the feasibility of GRPO and analyze its convergence for the log-linear policy class. By fine-tuning LLMs with GRPO using diverse group-based global opinion data, we significantly improved performance for the worst-performing groups, reduced loss imbalances across groups, and improved probability accuracies compared to non-robust baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.20304v1-abstract-full').style.display = 'none'; document.getElementById('2405.20304v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.12241">arXiv:2404.12241</a> <span> [<a href="https://arxiv.org/pdf/2404.12241">pdf</a>, <a href="https://arxiv.org/format/2404.12241">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Introducing v0.5 of the AI Safety Benchmark from MLCommons </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Vidgen%2C+B">Bertie Vidgen</a>, <a href="/search/cs?searchtype=author&query=Agrawal%2C+A">Adarsh Agrawal</a>, <a href="/search/cs?searchtype=author&query=Ahmed%2C+A+M">Ahmed M. Ahmed</a>, <a href="/search/cs?searchtype=author&query=Akinwande%2C+V">Victor Akinwande</a>, <a href="/search/cs?searchtype=author&query=Al-Nuaimi%2C+N">Namir Al-Nuaimi</a>, <a href="/search/cs?searchtype=author&query=Alfaraj%2C+N">Najla Alfaraj</a>, <a href="/search/cs?searchtype=author&query=Alhajjar%2C+E">Elie Alhajjar</a>, <a href="/search/cs?searchtype=author&query=Aroyo%2C+L">Lora Aroyo</a>, <a href="/search/cs?searchtype=author&query=Bavalatti%2C+T">Trupti Bavalatti</a>, <a href="/search/cs?searchtype=author&query=Bartolo%2C+M">Max Bartolo</a>, <a href="/search/cs?searchtype=author&query=Blili-Hamelin%2C+B">Borhane Blili-Hamelin</a>, <a href="/search/cs?searchtype=author&query=Bollacker%2C+K">Kurt Bollacker</a>, <a href="/search/cs?searchtype=author&query=Bomassani%2C+R">Rishi Bomassani</a>, <a href="/search/cs?searchtype=author&query=Boston%2C+M+F">Marisa Ferrara Boston</a>, <a href="/search/cs?searchtype=author&query=Campos%2C+S">Sim茅on Campos</a>, <a href="/search/cs?searchtype=author&query=Chakra%2C+K">Kal Chakra</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+C">Canyu Chen</a>, <a href="/search/cs?searchtype=author&query=Coleman%2C+C">Cody Coleman</a>, <a href="/search/cs?searchtype=author&query=Coudert%2C+Z+D">Zacharie Delpierre Coudert</a>, <a href="/search/cs?searchtype=author&query=Derczynski%2C+L">Leon Derczynski</a>, <a href="/search/cs?searchtype=author&query=Dutta%2C+D">Debojyoti Dutta</a>, <a href="/search/cs?searchtype=author&query=Eisenberg%2C+I">Ian Eisenberg</a>, <a href="/search/cs?searchtype=author&query=Ezick%2C+J">James Ezick</a>, <a href="/search/cs?searchtype=author&query=Frase%2C+H">Heather Frase</a>, <a href="/search/cs?searchtype=author&query=Fuller%2C+B">Brian Fuller</a> , et al. (75 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.12241v2-abstract-short" style="display: inline;"> This paper introduces v0.5 of the AI Safety Benchmark, which has been created by the MLCommons AI Safety Working Group. The AI Safety Benchmark has been designed to assess the safety risks of AI systems that use chat-tuned language models. We introduce a principled approach to specifying and constructing the benchmark, which for v0.5 covers only a single use case (an adult chatting to a general-pu… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.12241v2-abstract-full').style.display = 'inline'; document.getElementById('2404.12241v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.12241v2-abstract-full" style="display: none;"> This paper introduces v0.5 of the AI Safety Benchmark, which has been created by the MLCommons AI Safety Working Group. The AI Safety Benchmark has been designed to assess the safety risks of AI systems that use chat-tuned language models. We introduce a principled approach to specifying and constructing the benchmark, which for v0.5 covers only a single use case (an adult chatting to a general-purpose assistant in English), and a limited set of personas (i.e., typical users, malicious users, and vulnerable users). We created a new taxonomy of 13 hazard categories, of which 7 have tests in the v0.5 benchmark. We plan to release version 1.0 of the AI Safety Benchmark by the end of 2024. The v1.0 benchmark will provide meaningful insights into the safety of AI systems. However, the v0.5 benchmark should not be used to assess the safety of AI systems. We have sought to fully document the limitations, flaws, and challenges of v0.5. This release of v0.5 of the AI Safety Benchmark includes (1) a principled approach to specifying and constructing the benchmark, which comprises use cases, types of systems under test (SUTs), language and context, personas, tests, and test items; (2) a taxonomy of 13 hazard categories with definitions and subcategories; (3) tests for seven of the hazard categories, each comprising a unique set of test items, i.e., prompts. There are 43,090 test items in total, which we created with templates; (4) a grading system for AI systems against the benchmark; (5) an openly available platform, and downloadable tool, called ModelBench that can be used to evaluate the safety of AI systems on the benchmark; (6) an example evaluation report which benchmarks the performance of over a dozen openly available chat-tuned language models; (7) a test specification for the benchmark. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.12241v2-abstract-full').style.display = 'none'; document.getElementById('2404.12241v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.00399">arXiv:2404.00399</a> <span> [<a href="https://arxiv.org/pdf/2404.00399">pdf</a>, <a href="https://arxiv.org/format/2404.00399">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Aurora-M: The First Open Source Multilingual Language Model Red-teamed according to the U.S. Executive Order </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Nakamura%2C+T">Taishi Nakamura</a>, <a href="/search/cs?searchtype=author&query=Mishra%2C+M">Mayank Mishra</a>, <a href="/search/cs?searchtype=author&query=Tedeschi%2C+S">Simone Tedeschi</a>, <a href="/search/cs?searchtype=author&query=Chai%2C+Y">Yekun Chai</a>, <a href="/search/cs?searchtype=author&query=Stillerman%2C+J+T">Jason T Stillerman</a>, <a href="/search/cs?searchtype=author&query=Friedrich%2C+F">Felix Friedrich</a>, <a href="/search/cs?searchtype=author&query=Yadav%2C+P">Prateek Yadav</a>, <a href="/search/cs?searchtype=author&query=Laud%2C+T">Tanmay Laud</a>, <a href="/search/cs?searchtype=author&query=Chien%2C+V+M">Vu Minh Chien</a>, <a href="/search/cs?searchtype=author&query=Zhuo%2C+T+Y">Terry Yue Zhuo</a>, <a href="/search/cs?searchtype=author&query=Misra%2C+D">Diganta Misra</a>, <a href="/search/cs?searchtype=author&query=Bogin%2C+B">Ben Bogin</a>, <a href="/search/cs?searchtype=author&query=Vu%2C+X">Xuan-Son Vu</a>, <a href="/search/cs?searchtype=author&query=Karpinska%2C+M">Marzena Karpinska</a>, <a href="/search/cs?searchtype=author&query=Dantuluri%2C+A+V">Arnav Varma Dantuluri</a>, <a href="/search/cs?searchtype=author&query=Kusa%2C+W">Wojciech Kusa</a>, <a href="/search/cs?searchtype=author&query=Furlanello%2C+T">Tommaso Furlanello</a>, <a href="/search/cs?searchtype=author&query=Yokota%2C+R">Rio Yokota</a>, <a href="/search/cs?searchtype=author&query=Muennighoff%2C+N">Niklas Muennighoff</a>, <a href="/search/cs?searchtype=author&query=Pai%2C+S">Suhas Pai</a>, <a href="/search/cs?searchtype=author&query=Adewumi%2C+T">Tosin Adewumi</a>, <a href="/search/cs?searchtype=author&query=Laippala%2C+V">Veronika Laippala</a>, <a href="/search/cs?searchtype=author&query=Yao%2C+X">Xiaozhe Yao</a>, <a href="/search/cs?searchtype=author&query=Junior%2C+A">Adalberto Junior</a>, <a href="/search/cs?searchtype=author&query=Ariyak%2C+A">Alpay Ariyak</a> , et al. (20 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.00399v2-abstract-short" style="display: inline;"> Pretrained language models underpin several AI applications, but their high computational cost for training limits accessibility. Initiatives such as BLOOM and StarCoder aim to democratize access to pretrained models for collaborative community development. However, such existing models face challenges: limited multilingual capabilities, continual pretraining causing catastrophic forgetting, where… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00399v2-abstract-full').style.display = 'inline'; document.getElementById('2404.00399v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.00399v2-abstract-full" style="display: none;"> Pretrained language models underpin several AI applications, but their high computational cost for training limits accessibility. Initiatives such as BLOOM and StarCoder aim to democratize access to pretrained models for collaborative community development. However, such existing models face challenges: limited multilingual capabilities, continual pretraining causing catastrophic forgetting, whereas pretraining from scratch is computationally expensive, and compliance with AI safety and development laws. This paper presents Aurora-M, a 15B parameter multilingual open-source model trained on English, Finnish, Hindi, Japanese, Vietnamese, and code. Continually pretrained from StarCoderPlus on 435 billion additional tokens, Aurora-M surpasses 2 trillion tokens in total training token count. It is the first open-source multilingual model fine-tuned on human-reviewed safety instructions, thus aligning its development not only with conventional red-teaming considerations, but also with the specific concerns articulated in the Biden-Harris Executive Order on the Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence. Aurora-M is rigorously evaluated across various tasks and languages, demonstrating robustness against catastrophic forgetting and outperforming alternatives in multilingual settings, particularly in safety evaluations. To promote responsible open-source LLM development, Aurora-M and its variants are released at https://huggingface.co/collections/aurora-m/aurora-m-models-65fdfdff62471e09812f5407 . <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00399v2-abstract-full').style.display = 'none'; document.getElementById('2404.00399v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.00267">arXiv:2312.00267</a> <span> [<a href="https://arxiv.org/pdf/2312.00267">pdf</a>, <a href="https://arxiv.org/format/2312.00267">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Sample Efficient Reinforcement Learning from Human Feedback via Active Exploration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Viraj Mehta</a>, <a href="/search/cs?searchtype=author&query=Das%2C+V">Vikramjeet Das</a>, <a href="/search/cs?searchtype=author&query=Neopane%2C+O">Ojash Neopane</a>, <a href="/search/cs?searchtype=author&query=Dai%2C+Y">Yijia Dai</a>, <a href="/search/cs?searchtype=author&query=Bogunovic%2C+I">Ilija Bogunovic</a>, <a href="/search/cs?searchtype=author&query=Schneider%2C+J">Jeff Schneider</a>, <a href="/search/cs?searchtype=author&query=Neiswanger%2C+W">Willie Neiswanger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.00267v1-abstract-short" style="display: inline;"> Preference-based feedback is important for many applications in reinforcement learning where direct evaluation of a reward function is not feasible. A notable recent example arises in reinforcement learning from human feedback (RLHF) on large language models. For many applications of RLHF, the cost of acquiring the human feedback can be substantial. In this work, we take advantage of the fact that… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.00267v1-abstract-full').style.display = 'inline'; document.getElementById('2312.00267v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.00267v1-abstract-full" style="display: none;"> Preference-based feedback is important for many applications in reinforcement learning where direct evaluation of a reward function is not feasible. A notable recent example arises in reinforcement learning from human feedback (RLHF) on large language models. For many applications of RLHF, the cost of acquiring the human feedback can be substantial. In this work, we take advantage of the fact that one can often choose contexts at which to obtain human feedback in order to most efficiently identify a good policy, and formalize this as an offline contextual dueling bandit problem. We give an upper-confidence-bound style algorithm for this problem and prove a polynomial worst-case regret bound. We then provide empirical confirmation in a synthetic setting that our approach outperforms existing methods. After, we extend the setting and methodology for practical use in RLHF training of large language models. Here, our method is able to reach better performance with fewer samples of human preferences than multiple baselines on three real-world datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.00267v1-abstract-full').style.display = 'none'; document.getElementById('2312.00267v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.05674">arXiv:2310.05674</a> <span> [<a href="https://arxiv.org/pdf/2310.05674">pdf</a>, <a href="https://arxiv.org/format/2310.05674">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Making Scalable Meta Learning Practical </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Choe%2C+S+K">Sang Keun Choe</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=Ahn%2C+H">Hwijeen Ahn</a>, <a href="/search/cs?searchtype=author&query=Neiswanger%2C+W">Willie Neiswanger</a>, <a href="/search/cs?searchtype=author&query=Xie%2C+P">Pengtao Xie</a>, <a href="/search/cs?searchtype=author&query=Strubell%2C+E">Emma Strubell</a>, <a href="/search/cs?searchtype=author&query=Xing%2C+E">Eric Xing</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.05674v2-abstract-short" style="display: inline;"> Despite its flexibility to learn diverse inductive biases in machine learning programs, meta learning (i.e., learning to learn) has long been recognized to suffer from poor scalability due to its tremendous compute/memory costs, training instability, and a lack of efficient distributed training support. In this work, we focus on making scalable meta learning practical by introducing SAMA, which co… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.05674v2-abstract-full').style.display = 'inline'; document.getElementById('2310.05674v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.05674v2-abstract-full" style="display: none;"> Despite its flexibility to learn diverse inductive biases in machine learning programs, meta learning (i.e., learning to learn) has long been recognized to suffer from poor scalability due to its tremendous compute/memory costs, training instability, and a lack of efficient distributed training support. In this work, we focus on making scalable meta learning practical by introducing SAMA, which combines advances in both implicit differentiation algorithms and systems. Specifically, SAMA is designed to flexibly support a broad range of adaptive optimizers in the base level of meta learning programs, while reducing computational burden by avoiding explicit computation of second-order gradient information, and exploiting efficient distributed training techniques implemented for first-order gradients. Evaluated on multiple large-scale meta learning benchmarks, SAMA showcases up to 1.7/4.8x increase in throughput and 2.0/3.8x decrease in memory consumption respectively on single-/multi-GPU setups compared to other baseline meta learning algorithms. Furthermore, we show that SAMA-based data optimization leads to consistent improvements in text classification accuracy with BERT and RoBERTa large language models, and achieves state-of-the-art results in both small- and large-scale data pruning on image classification tasks, demonstrating the practical applicability of scalable meta learning across language and vision domains. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.05674v2-abstract-full').style.display = 'none'; document.getElementById('2310.05674v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.15141">arXiv:2308.15141</a> <span> [<a href="https://arxiv.org/pdf/2308.15141">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.media.2023.102861">10.1016/j.media.2023.102861 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Uncertainty Aware Training to Improve Deep Learning Model Calibration for Classification of Cardiac MR Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Dawood%2C+T">Tareen Dawood</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+C">Chen Chen</a>, <a href="/search/cs?searchtype=author&query=Sidhua%2C+B+S">Baldeep S. Sidhua</a>, <a href="/search/cs?searchtype=author&query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&query=Goulda%2C+J">Justin Goulda</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+B">Bradley Porter</a>, <a href="/search/cs?searchtype=author&query=Elliott%2C+M+K">Mark K. Elliott</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Vishal Mehta</a>, <a href="/search/cs?searchtype=author&query=Rinaldi%2C+C+A">Christopher A. Rinaldi</a>, <a href="/search/cs?searchtype=author&query=Puyol-Anton%2C+E">Esther Puyol-Anton</a>, <a href="/search/cs?searchtype=author&query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.15141v1-abstract-short" style="display: inline;"> Quantifying uncertainty of predictions has been identified as one way to develop more trustworthy artificial intelligence (AI) models beyond conventional reporting of performance metrics. When considering their role in a clinical decision support setting, AI classification models should ideally avoid confident wrong predictions and maximise the confidence of correct predictions. Models that do thi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.15141v1-abstract-full').style.display = 'inline'; document.getElementById('2308.15141v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.15141v1-abstract-full" style="display: none;"> Quantifying uncertainty of predictions has been identified as one way to develop more trustworthy artificial intelligence (AI) models beyond conventional reporting of performance metrics. When considering their role in a clinical decision support setting, AI classification models should ideally avoid confident wrong predictions and maximise the confidence of correct predictions. Models that do this are said to be well-calibrated with regard to confidence. However, relatively little attention has been paid to how to improve calibration when training these models, i.e., to make the training strategy uncertainty-aware. In this work we evaluate three novel uncertainty-aware training strategies comparing against two state-of-the-art approaches. We analyse performance on two different clinical applications: cardiac resynchronisation therapy (CRT) response prediction and coronary artery disease (CAD) diagnosis from cardiac magnetic resonance (CMR) images. The best-performing model in terms of both classification accuracy and the most common calibration measure, expected calibration error (ECE) was the Confidence Weight method, a novel approach that weights the loss of samples to explicitly penalise confident incorrect predictions. The method reduced the ECE by 17% for CRT response prediction and by 22% for CAD diagnosis when compared to a baseline classifier in which no uncertainty-aware strategy was included. In both applications, as well as reducing the ECE there was a slight increase in accuracy from 69% to 70% and 70% to 72% for CRT response prediction and CAD diagnosis respectively. However, our analysis showed a lack of consistency in terms of optimal models when using different calibration measures. This indicates the need for careful consideration of performance metrics when training and selecting models for complex high-risk applications in healthcare. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.15141v1-abstract-full').style.display = 'none'; document.getElementById('2308.15141v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.11288">arXiv:2307.11288</a> <span> [<a href="https://arxiv.org/pdf/2307.11288">pdf</a>, <a href="https://arxiv.org/format/2307.11288">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Kernelized Offline Contextual Dueling Bandits </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Viraj Mehta</a>, <a href="/search/cs?searchtype=author&query=Neopane%2C+O">Ojash Neopane</a>, <a href="/search/cs?searchtype=author&query=Das%2C+V">Vikramjeet Das</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+S">Sen Lin</a>, <a href="/search/cs?searchtype=author&query=Schneider%2C+J">Jeff Schneider</a>, <a href="/search/cs?searchtype=author&query=Neiswanger%2C+W">Willie Neiswanger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.11288v1-abstract-short" style="display: inline;"> Preference-based feedback is important for many applications where direct evaluation of a reward function is not feasible. A notable recent example arises in reinforcement learning from human feedback on large language models. For many of these applications, the cost of acquiring the human feedback can be substantial or even prohibitive. In this work, we take advantage of the fact that often the a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.11288v1-abstract-full').style.display = 'inline'; document.getElementById('2307.11288v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.11288v1-abstract-full" style="display: none;"> Preference-based feedback is important for many applications where direct evaluation of a reward function is not feasible. A notable recent example arises in reinforcement learning from human feedback on large language models. For many of these applications, the cost of acquiring the human feedback can be substantial or even prohibitive. In this work, we take advantage of the fact that often the agent can choose contexts at which to obtain human feedback in order to most efficiently identify a good policy, and introduce the offline contextual dueling bandit setting. We give an upper-confidence-bound style algorithm for this setting and prove a regret bound. We also give empirical confirmation that this method outperforms a similar strategy that uses uniformly sampled contexts. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.11288v1-abstract-full').style.display = 'none'; document.getElementById('2307.11288v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.05376">arXiv:2306.05376</a> <span> [<a href="https://arxiv.org/pdf/2306.05376">pdf</a>, <a href="https://arxiv.org/format/2306.05376">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Anomaly Detection in Satellite Videos using Diffusion Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Awasthi%2C+A">Akash Awasthi</a>, <a href="/search/cs?searchtype=author&query=Ly%2C+S">Son Ly</a>, <a href="/search/cs?searchtype=author&query=Nizam%2C+J">Jaer Nizam</a>, <a href="/search/cs?searchtype=author&query=Zare%2C+S">Samira Zare</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Videet Mehta</a>, <a href="/search/cs?searchtype=author&query=Ahmed%2C+S">Safwan Ahmed</a>, <a href="/search/cs?searchtype=author&query=Shah%2C+K">Keshav Shah</a>, <a href="/search/cs?searchtype=author&query=Nemani%2C+R">Ramakrishna Nemani</a>, <a href="/search/cs?searchtype=author&query=Prasad%2C+S">Saurabh Prasad</a>, <a href="/search/cs?searchtype=author&query=Van+Nguyen%2C+H">Hien Van Nguyen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.05376v1-abstract-short" style="display: inline;"> The definition of anomaly detection is the identification of an unexpected event. Real-time detection of extreme events such as wildfires, cyclones, or floods using satellite data has become crucial for disaster management. Although several earth-observing satellites provide information about disasters, satellites in the geostationary orbit provide data at intervals as frequent as every minute, ef… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.05376v1-abstract-full').style.display = 'inline'; document.getElementById('2306.05376v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.05376v1-abstract-full" style="display: none;"> The definition of anomaly detection is the identification of an unexpected event. Real-time detection of extreme events such as wildfires, cyclones, or floods using satellite data has become crucial for disaster management. Although several earth-observing satellites provide information about disasters, satellites in the geostationary orbit provide data at intervals as frequent as every minute, effectively creating a video from space. There are many techniques that have been proposed to identify anomalies in surveillance videos; however, the available datasets do not have dynamic behavior, so we discuss an anomaly framework that can work on very high-frequency datasets to find very fast-moving anomalies. In this work, we present a diffusion model which does not need any motion component to capture the fast-moving anomalies and outperforms the other baseline methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.05376v1-abstract-full').style.display = 'none'; document.getElementById('2306.05376v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.00131">arXiv:2305.00131</a> <span> [<a href="https://arxiv.org/pdf/2305.00131">pdf</a>, <a href="https://arxiv.org/format/2305.00131">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Regularizing Self-training for Unsupervised Domain Adaptation via Structural Constraints </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Das%2C+R">Rajshekhar Das</a>, <a href="/search/cs?searchtype=author&query=Francis%2C+J">Jonathan Francis</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=Oh%2C+J">Jean Oh</a>, <a href="/search/cs?searchtype=author&query=Strubell%2C+E">Emma Strubell</a>, <a href="/search/cs?searchtype=author&query=Moura%2C+J">Jose Moura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.00131v1-abstract-short" style="display: inline;"> Self-training based on pseudo-labels has emerged as a dominant approach for addressing conditional distribution shifts in unsupervised domain adaptation (UDA) for semantic segmentation problems. A notable drawback, however, is that this family of approaches is susceptible to erroneous pseudo labels that arise from confirmation biases in the source domain and that manifest as nuisance factors in th… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.00131v1-abstract-full').style.display = 'inline'; document.getElementById('2305.00131v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.00131v1-abstract-full" style="display: none;"> Self-training based on pseudo-labels has emerged as a dominant approach for addressing conditional distribution shifts in unsupervised domain adaptation (UDA) for semantic segmentation problems. A notable drawback, however, is that this family of approaches is susceptible to erroneous pseudo labels that arise from confirmation biases in the source domain and that manifest as nuisance factors in the target domain. A possible source for this mismatch is the reliance on only photometric cues provided by RGB image inputs, which may ultimately lead to sub-optimal adaptation. To mitigate the effect of mismatched pseudo-labels, we propose to incorporate structural cues from auxiliary modalities, such as depth, to regularise conventional self-training objectives. Specifically, we introduce a contrastive pixel-level objectness constraint that pulls the pixel representations within a region of an object instance closer, while pushing those from different object categories apart. To obtain object regions consistent with the true underlying object, we extract information from both depth maps and RGB-images in the form of multimodal clustering. Crucially, the objectness constraint is agnostic to the ground-truth semantic labels and, hence, appropriate for unsupervised domain adaptation. In this work, we show that our regularizer significantly improves top performing self-training methods (by up to $2$ points) in various UDA benchmarks for semantic segmentation. We include all code in the supplementary. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.00131v1-abstract-full').style.display = 'none'; document.getElementById('2305.00131v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.08486">arXiv:2304.08486</a> <span> [<a href="https://arxiv.org/pdf/2304.08486">pdf</a>, <a href="https://arxiv.org/format/2304.08486">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> BenchMD: A Benchmark for Unified Learning on Medical Images and Sensors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wantlin%2C+K">Kathryn Wantlin</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+C">Chenwei Wu</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+S">Shih-Cheng Huang</a>, <a href="/search/cs?searchtype=author&query=Banerjee%2C+O">Oishi Banerjee</a>, <a href="/search/cs?searchtype=author&query=Dadabhoy%2C+F">Farah Dadabhoy</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V+V">Veeral Vipin Mehta</a>, <a href="/search/cs?searchtype=author&query=Han%2C+R+W">Ryan Wonhee Han</a>, <a href="/search/cs?searchtype=author&query=Cao%2C+F">Fang Cao</a>, <a href="/search/cs?searchtype=author&query=Narayan%2C+R+R">Raja R. Narayan</a>, <a href="/search/cs?searchtype=author&query=Colak%2C+E">Errol Colak</a>, <a href="/search/cs?searchtype=author&query=Adamson%2C+A">Adewole Adamson</a>, <a href="/search/cs?searchtype=author&query=Heacock%2C+L">Laura Heacock</a>, <a href="/search/cs?searchtype=author&query=Tison%2C+G+H">Geoffrey H. Tison</a>, <a href="/search/cs?searchtype=author&query=Tamkin%2C+A">Alex Tamkin</a>, <a href="/search/cs?searchtype=author&query=Rajpurkar%2C+P">Pranav Rajpurkar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.08486v2-abstract-short" style="display: inline;"> Medical data poses a daunting challenge for AI algorithms: it exists in many different modalities, experiences frequent distribution shifts, and suffers from a scarcity of examples and labels. Recent advances, including transformers and self-supervised learning, promise a more universal approach that can be applied flexibly across these diverse conditions. To measure and drive progress in this dir… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.08486v2-abstract-full').style.display = 'inline'; document.getElementById('2304.08486v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.08486v2-abstract-full" style="display: none;"> Medical data poses a daunting challenge for AI algorithms: it exists in many different modalities, experiences frequent distribution shifts, and suffers from a scarcity of examples and labels. Recent advances, including transformers and self-supervised learning, promise a more universal approach that can be applied flexibly across these diverse conditions. To measure and drive progress in this direction, we present BenchMD: a benchmark that tests how well unified, modality-agnostic methods, including architectures and training techniques (e.g. self-supervised learning, ImageNet pretraining),perform on a diverse array of clinically-relevant medical tasks. BenchMD combines 19 publicly available datasets for 7 medical modalities, including 1D sensor data, 2D images, and 3D volumetric scans. Our benchmark reflects real-world data constraints by evaluating methods across a range of dataset sizes, including challenging few-shot settings that incentivize the use of pretraining. Finally, we evaluate performance on out-of-distribution data collected at different hospitals than the training data, representing naturally-occurring distribution shifts that frequently degrade the performance of medical AI models. Our baseline results demonstrate that no unified learning technique achieves strong performance across all modalities, leaving ample room for improvement on the benchmark. Code is released at https://github.com/rajpurkarlab/BenchMD. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.08486v2-abstract-full').style.display = 'none'; document.getElementById('2304.08486v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.09744">arXiv:2212.09744</a> <span> [<a href="https://arxiv.org/pdf/2212.09744">pdf</a>, <a href="https://arxiv.org/format/2212.09744">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> DSI++: Updating Transformer Memory with New Documents </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=Gupta%2C+J">Jai Gupta</a>, <a href="/search/cs?searchtype=author&query=Tay%2C+Y">Yi Tay</a>, <a href="/search/cs?searchtype=author&query=Dehghani%2C+M">Mostafa Dehghani</a>, <a href="/search/cs?searchtype=author&query=Tran%2C+V+Q">Vinh Q. Tran</a>, <a href="/search/cs?searchtype=author&query=Rao%2C+J">Jinfeng Rao</a>, <a href="/search/cs?searchtype=author&query=Najork%2C+M">Marc Najork</a>, <a href="/search/cs?searchtype=author&query=Strubell%2C+E">Emma Strubell</a>, <a href="/search/cs?searchtype=author&query=Metzler%2C+D">Donald Metzler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.09744v3-abstract-short" style="display: inline;"> Differentiable Search Indices (DSIs) encode a corpus of documents in model parameters and use the same model to answer user queries directly. Despite the strong performance of DSI models, deploying them in situations where the corpus changes over time is computationally expensive because reindexing the corpus requires re-training the model. In this work, we introduce DSI++, a continual learning ch… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.09744v3-abstract-full').style.display = 'inline'; document.getElementById('2212.09744v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.09744v3-abstract-full" style="display: none;"> Differentiable Search Indices (DSIs) encode a corpus of documents in model parameters and use the same model to answer user queries directly. Despite the strong performance of DSI models, deploying them in situations where the corpus changes over time is computationally expensive because reindexing the corpus requires re-training the model. In this work, we introduce DSI++, a continual learning challenge for DSI to incrementally index new documents while being able to answer queries related to both previously and newly indexed documents. Across different model scales and document identifier representations, we show that continual indexing of new documents leads to considerable forgetting of previously indexed documents. We also hypothesize and verify that the model experiences forgetting events during training, leading to unstable learning. To mitigate these issues, we investigate two approaches. The first focuses on modifying the training dynamics. Flatter minima implicitly alleviate forgetting, so we optimize for flatter loss basins and show that the model stably memorizes more documents ($+12\%$). Next, we introduce a generative memory to sample pseudo-queries for documents and supplement them during continual indexing to prevent forgetting for the retrieval task. Extensive experiments on novel continual indexing benchmarks based on Natural Questions (NQ) and MS MARCO demonstrate that our proposed solution mitigates forgetting significantly. Concretely, it improves the average Hits@10 by $+21.1\%$ over competitive baselines for NQ and requires $6$ times fewer model updates compared to re-training the DSI model for incrementally indexing five corpora in a sequence. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.09744v3-abstract-full').style.display = 'none'; document.getElementById('2212.09744v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at EMNLP 2023 main conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.09510">arXiv:2212.09510</a> <span> [<a href="https://arxiv.org/pdf/2212.09510">pdf</a>, <a href="https://arxiv.org/format/2212.09510">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Near-optimal Policy Identification in Active Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+X">Xiang Li</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Viraj Mehta</a>, <a href="/search/cs?searchtype=author&query=Kirschner%2C+J">Johannes Kirschner</a>, <a href="/search/cs?searchtype=author&query=Char%2C+I">Ian Char</a>, <a href="/search/cs?searchtype=author&query=Neiswanger%2C+W">Willie Neiswanger</a>, <a href="/search/cs?searchtype=author&query=Schneider%2C+J">Jeff Schneider</a>, <a href="/search/cs?searchtype=author&query=Krause%2C+A">Andreas Krause</a>, <a href="/search/cs?searchtype=author&query=Bogunovic%2C+I">Ilija Bogunovic</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.09510v1-abstract-short" style="display: inline;"> Many real-world reinforcement learning tasks require control of complex dynamical systems that involve both costly data acquisition processes and large state spaces. In cases where the transition dynamics can be readily evaluated at specified states (e.g., via a simulator), agents can operate in what is often referred to as planning with a \emph{generative model}. We propose the AE-LSVI algorithm… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.09510v1-abstract-full').style.display = 'inline'; document.getElementById('2212.09510v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.09510v1-abstract-full" style="display: none;"> Many real-world reinforcement learning tasks require control of complex dynamical systems that involve both costly data acquisition processes and large state spaces. In cases where the transition dynamics can be readily evaluated at specified states (e.g., via a simulator), agents can operate in what is often referred to as planning with a \emph{generative model}. We propose the AE-LSVI algorithm for best-policy identification, a novel variant of the kernelized least-squares value iteration (LSVI) algorithm that combines optimism with pessimism for active exploration (AE). AE-LSVI provably identifies a near-optimal policy \emph{uniformly} over an entire state space and achieves polynomial sample complexity guarantees that are independent of the number of states. When specialized to the recently introduced offline contextual Bayesian optimization setting, our algorithm achieves improved sample complexity bounds. Experimentally, we demonstrate that AE-LSVI outperforms other RL algorithms in a variety of environments when robustness to the initial state is required. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.09510v1-abstract-full').style.display = 'none'; document.getElementById('2212.09510v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.02626">arXiv:2212.02626</a> <span> [<a href="https://arxiv.org/pdf/2212.02626">pdf</a>, <a href="https://arxiv.org/format/2212.02626">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3576915.3623105">10.1145/3576915.3623105 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A Generic Methodology for the Modular Verification of Security Protocol Implementations (extended version) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Arquint%2C+L">Linard Arquint</a>, <a href="/search/cs?searchtype=author&query=Schwerhoff%2C+M">Malte Schwerhoff</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=M%C3%BCller%2C+P">Peter M眉ller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.02626v2-abstract-short" style="display: inline;"> Security protocols are essential building blocks of modern IT systems. Subtle flaws in their design or implementation may compromise the security of entire systems. It is, thus, important to prove the absence of such flaws through formal verification. Much existing work focuses on the verification of protocol *models*, which is not sufficient to show that their *implementations* are actually secur… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.02626v2-abstract-full').style.display = 'inline'; document.getElementById('2212.02626v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.02626v2-abstract-full" style="display: none;"> Security protocols are essential building blocks of modern IT systems. Subtle flaws in their design or implementation may compromise the security of entire systems. It is, thus, important to prove the absence of such flaws through formal verification. Much existing work focuses on the verification of protocol *models*, which is not sufficient to show that their *implementations* are actually secure. Verification techniques for protocol implementations (e.g., via code generation or model extraction) typically impose severe restrictions on the used programming language and code design, which may lead to sub-optimal implementations. In this paper, we present a methodology for the modular verification of strong security properties directly on the level of the protocol implementations. Our methodology leverages state-of-the-art verification logics and tools to support a wide range of implementations and programming languages. We demonstrate its effectiveness by verifying memory safety and security of Go implementations of the Needham-Schroeder-Lowe, Diffie-Hellman key exchange, and WireGuard protocols, including forward secrecy and injective agreement for WireGuard. We also show that our methodology is agnostic to a particular language or program verifier with a prototype implementation for C. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.02626v2-abstract-full').style.display = 'none'; document.getElementById('2212.02626v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.04642">arXiv:2210.04642</a> <span> [<a href="https://arxiv.org/pdf/2210.04642">pdf</a>, <a href="https://arxiv.org/format/2210.04642">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Exploration via Planning for Information about the Optimal Trajectory </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Viraj Mehta</a>, <a href="/search/cs?searchtype=author&query=Char%2C+I">Ian Char</a>, <a href="/search/cs?searchtype=author&query=Abbate%2C+J">Joseph Abbate</a>, <a href="/search/cs?searchtype=author&query=Conlin%2C+R">Rory Conlin</a>, <a href="/search/cs?searchtype=author&query=Boyer%2C+M+D">Mark D. Boyer</a>, <a href="/search/cs?searchtype=author&query=Ermon%2C+S">Stefano Ermon</a>, <a href="/search/cs?searchtype=author&query=Schneider%2C+J">Jeff Schneider</a>, <a href="/search/cs?searchtype=author&query=Neiswanger%2C+W">Willie Neiswanger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.04642v1-abstract-short" style="display: inline;"> Many potential applications of reinforcement learning (RL) are stymied by the large numbers of samples required to learn an effective policy. This is especially true when applying RL to real-world control tasks, e.g. in the sciences or robotics, where executing a policy in the environment is costly. In popular RL algorithms, agents typically explore either by adding stochasticity to a reward-maxim… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.04642v1-abstract-full').style.display = 'inline'; document.getElementById('2210.04642v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.04642v1-abstract-full" style="display: none;"> Many potential applications of reinforcement learning (RL) are stymied by the large numbers of samples required to learn an effective policy. This is especially true when applying RL to real-world control tasks, e.g. in the sciences or robotics, where executing a policy in the environment is costly. In popular RL algorithms, agents typically explore either by adding stochasticity to a reward-maximizing policy or by attempting to gather maximal information about environment dynamics without taking the given task into account. In this work, we develop a method that allows us to plan for exploration while taking both the task and the current knowledge about the dynamics into account. The key insight to our approach is to plan an action sequence that maximizes the expected information gain about the optimal trajectory for the task at hand. We demonstrate that our method learns strong policies with 2x fewer samples than strong exploration baselines and 200x fewer samples than model free methods on a diverse set of low-to-medium dimensional control tasks in both the open-loop and closed-loop control settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.04642v1-abstract-full').style.display = 'none'; document.getElementById('2210.04642v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Conference paper at Neurips 2022. Code available at https://github.com/fusion-ml/trajectory-information-rl. arXiv admin note: text overlap with arXiv:2112.05244</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.04354">arXiv:2207.04354</a> <span> [<a href="https://arxiv.org/pdf/2207.04354">pdf</a>, <a href="https://arxiv.org/format/2207.04354">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> An Introduction to Lifelong Supervised Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Sodhani%2C+S">Shagun Sodhani</a>, <a href="/search/cs?searchtype=author&query=Faramarzi%2C+M">Mojtaba Faramarzi</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=Malviya%2C+P">Pranshu Malviya</a>, <a href="/search/cs?searchtype=author&query=Abdelsalam%2C+M">Mohamed Abdelsalam</a>, <a href="/search/cs?searchtype=author&query=Janarthanan%2C+J">Janarthanan Janarthanan</a>, <a href="/search/cs?searchtype=author&query=Chandar%2C+S">Sarath Chandar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.04354v2-abstract-short" style="display: inline;"> This primer is an attempt to provide a detailed summary of the different facets of lifelong learning. We start with Chapter 2 which provides a high-level overview of lifelong learning systems. In this chapter, we discuss prominent scenarios in lifelong learning (Section 2.4), provide 8 Introduction a high-level organization of different lifelong learning approaches (Section 2.5), enumerate the des… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.04354v2-abstract-full').style.display = 'inline'; document.getElementById('2207.04354v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.04354v2-abstract-full" style="display: none;"> This primer is an attempt to provide a detailed summary of the different facets of lifelong learning. We start with Chapter 2 which provides a high-level overview of lifelong learning systems. In this chapter, we discuss prominent scenarios in lifelong learning (Section 2.4), provide 8 Introduction a high-level organization of different lifelong learning approaches (Section 2.5), enumerate the desiderata for an ideal lifelong learning system (Section 2.6), discuss how lifelong learning is related to other learning paradigms (Section 2.7), describe common metrics used to evaluate lifelong learning systems (Section 2.8). This chapter is more useful for readers who are new to lifelong learning and want to get introduced to the field without focusing on specific approaches or benchmarks. The remaining chapters focus on specific aspects (either learning algorithms or benchmarks) and are more useful for readers who are looking for specific approaches or benchmarks. Chapter 3 focuses on regularization-based approaches that do not assume access to any data from previous tasks. Chapter 4 discusses memory-based approaches that typically use a replay buffer or an episodic memory to save subset of data across different tasks. Chapter 5 focuses on different architecture families (and their instantiations) that have been proposed for training lifelong learning systems. Following these different classes of learning algorithms, we discuss the commonly used evaluation benchmarks and metrics for lifelong learning (Chapter 6) and wrap up with a discussion of future challenges and important research directions in Chapter 7. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.04354v2-abstract-full').style.display = 'none'; document.getElementById('2207.04354v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Lifelong Learning Primer</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.12694">arXiv:2205.12694</a> <span> [<a href="https://arxiv.org/pdf/2205.12694">pdf</a>, <a href="https://arxiv.org/format/2205.12694">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.18653/v1/2022.findings-emnlp.361">10.18653/v1/2022.findings-emnlp.361 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Train Flat, Then Compress: Sharpness-Aware Minimization Learns More Compressible Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Na%2C+C">Clara Na</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=Strubell%2C+E">Emma Strubell</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.12694v2-abstract-short" style="display: inline;"> Model compression by way of parameter pruning, quantization, or distillation has recently gained popularity as an approach for reducing the computational requirements of modern deep neural network models for NLP. Inspired by prior works suggesting a connection between simpler, more generalizable models and those that lie within wider loss basins, we hypothesize that optimizing for flat minima shou… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.12694v2-abstract-full').style.display = 'inline'; document.getElementById('2205.12694v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.12694v2-abstract-full" style="display: none;"> Model compression by way of parameter pruning, quantization, or distillation has recently gained popularity as an approach for reducing the computational requirements of modern deep neural network models for NLP. Inspired by prior works suggesting a connection between simpler, more generalizable models and those that lie within wider loss basins, we hypothesize that optimizing for flat minima should lead to simpler parameterizations and thus more compressible models. We propose to combine sharpness-aware minimization (SAM) with various task-specific model compression methods, including iterative magnitude pruning (IMP), structured pruning with a distillation objective, and post-training dynamic quantization. Empirically, we show that optimizing for flatter minima consistently leads to greater compressibility of parameters compared to vanilla Adam when fine-tuning BERT models, with little to no loss in accuracy on the GLUE text classification and SQuAD question answering benchmarks. Moreover, SAM finds superior winning tickets during IMP that 1) are amenable to vanilla Adam optimization, and 2) transfer more effectively across tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.12694v2-abstract-full').style.display = 'none'; document.getElementById('2205.12694v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">EMNLP 2022 Findings, 28 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.12026">arXiv:2204.12026</a> <span> [<a href="https://arxiv.org/pdf/2204.12026">pdf</a>, <a href="https://arxiv.org/format/2204.12026">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> BATS: Best Action Trajectory Stitching </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Char%2C+I">Ian Char</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Viraj Mehta</a>, <a href="/search/cs?searchtype=author&query=Villaflor%2C+A">Adam Villaflor</a>, <a href="/search/cs?searchtype=author&query=Dolan%2C+J+M">John M. Dolan</a>, <a href="/search/cs?searchtype=author&query=Schneider%2C+J">Jeff Schneider</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.12026v1-abstract-short" style="display: inline;"> The problem of offline reinforcement learning focuses on learning a good policy from a log of environment interactions. Past efforts for developing algorithms in this area have revolved around introducing constraints to online reinforcement learning algorithms to ensure the actions of the learned policy are constrained to the logged data. In this work, we explore an alternative approach by plannin… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.12026v1-abstract-full').style.display = 'inline'; document.getElementById('2204.12026v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.12026v1-abstract-full" style="display: none;"> The problem of offline reinforcement learning focuses on learning a good policy from a log of environment interactions. Past efforts for developing algorithms in this area have revolved around introducing constraints to online reinforcement learning algorithms to ensure the actions of the learned policy are constrained to the logged data. In this work, we explore an alternative approach by planning on the fixed dataset directly. Specifically, we introduce an algorithm which forms a tabular Markov Decision Process (MDP) over the logged data by adding new transitions to the dataset. We do this by using learned dynamics models to plan short trajectories between states. Since exact value iteration can be performed on this constructed MDP, it becomes easy to identify which trajectories are advantageous to add to the MDP. Crucially, since most transitions in this MDP come from the logged data, trajectories from the MDP can be rolled out for long periods with confidence. We prove that this property allows one to make upper and lower bounds on the value function up to appropriate distance metrics. Finally, we demonstrate empirically how algorithms that uniformly constrain the learned policy to the entire dataset can result in unwanted behavior, and we show an example in which simply behavior cloning the optimal policy of the MDP created by our algorithm avoids this problem. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.12026v1-abstract-full').style.display = 'none'; document.getElementById('2204.12026v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to NeurIPS Offline RL Workshop 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.11726">arXiv:2203.11726</a> <span> [<a href="https://arxiv.org/pdf/2203.11726">pdf</a>, <a href="https://arxiv.org/format/2203.11726">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> AI-enabled Assessment of Cardiac Systolic and Diastolic Function from Echocardiography </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a>, <a href="/search/cs?searchtype=author&query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&query=Sidhu%2C+B+S">Baldeep S. Sidhu</a>, <a href="/search/cs?searchtype=author&query=Gould%2C+J">Justin Gould</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+B">Bradley Porter</a>, <a href="/search/cs?searchtype=author&query=Elliott%2C+M+K">Mark K. Elliott</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Vishal Mehta</a>, <a href="/search/cs?searchtype=author&query=Gu%2C+H">Haotian Gu</a>, <a href="/search/cs?searchtype=author&query=Xochicale%2C+M">Miguel Xochicale</a>, <a href="/search/cs?searchtype=author&query=Gomez%2C+A">Alberto Gomez</a>, <a href="/search/cs?searchtype=author&query=Rinaldi%2C+C+A">Christopher A. Rinaldi</a>, <a href="/search/cs?searchtype=author&query=Cowie%2C+M">Martin Cowie</a>, <a href="/search/cs?searchtype=author&query=Chowienczyk%2C+P">Phil Chowienczyk</a>, <a href="/search/cs?searchtype=author&query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.11726v2-abstract-short" style="display: inline;"> Left ventricular (LV) function is an important factor in terms of patient management, outcome, and long-term survival of patients with heart disease. The most recently published clinical guidelines for heart failure recognise that over reliance on only one measure of cardiac function (LV ejection fraction) as a diagnostic and treatment stratification biomarker is suboptimal. Recent advances in AI-… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.11726v2-abstract-full').style.display = 'inline'; document.getElementById('2203.11726v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.11726v2-abstract-full" style="display: none;"> Left ventricular (LV) function is an important factor in terms of patient management, outcome, and long-term survival of patients with heart disease. The most recently published clinical guidelines for heart failure recognise that over reliance on only one measure of cardiac function (LV ejection fraction) as a diagnostic and treatment stratification biomarker is suboptimal. Recent advances in AI-based echocardiography analysis have shown excellent results on automated estimation of LV volumes and LV ejection fraction. However, from time-varying 2-D echocardiography acquisition, a richer description of cardiac function can be obtained by estimating functional biomarkers from the complete cardiac cycle. In this work we propose for the first time an AI approach for deriving advanced biomarkers of systolic and diastolic LV function from 2-D echocardiography based on segmentations of the full cardiac cycle. These biomarkers will allow clinicians to obtain a much richer picture of the heart in health and disease. The AI model is based on the 'nn-Unet' framework and was trained and tested using four different databases. Results show excellent agreement between manual and automated analysis and showcase the potential of the advanced systolic and diastolic biomarkers for patient stratification. Finally, for a subset of 50 cases, we perform a correlation analysis between clinical biomarkers derived from echocardiography and CMR and we show excellent agreement between the two modalities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.11726v2-abstract-full').style.display = 'none'; document.getElementById('2203.11726v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> MICCAI ASMUS 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.09153">arXiv:2112.09153</a> <span> [<a href="https://arxiv.org/pdf/2112.09153">pdf</a>, <a href="https://arxiv.org/format/2112.09153">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> An Empirical Investigation of the Role of Pre-training in Lifelong Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=Patil%2C+D">Darshan Patil</a>, <a href="/search/cs?searchtype=author&query=Chandar%2C+S">Sarath Chandar</a>, <a href="/search/cs?searchtype=author&query=Strubell%2C+E">Emma Strubell</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.09153v2-abstract-short" style="display: inline;"> The lifelong learning paradigm in machine learning is an attractive alternative to the more prominent isolated learning scheme not only due to its resemblance to biological learning but also its potential to reduce energy waste by obviating excessive model re-training. A key challenge to this paradigm is the phenomenon of catastrophic forgetting. With the increasing popularity and success of pre-t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.09153v2-abstract-full').style.display = 'inline'; document.getElementById('2112.09153v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.09153v2-abstract-full" style="display: none;"> The lifelong learning paradigm in machine learning is an attractive alternative to the more prominent isolated learning scheme not only due to its resemblance to biological learning but also its potential to reduce energy waste by obviating excessive model re-training. A key challenge to this paradigm is the phenomenon of catastrophic forgetting. With the increasing popularity and success of pre-trained models in machine learning, we pose the question: What role does pre-training play in lifelong learning, specifically with respect to catastrophic forgetting? We investigate existing methods in the context of large, pre-trained models and evaluate their performance on a variety of text and image classification tasks, including a large-scale study using a novel data set of 15 diverse NLP tasks. Across all settings, we observe that generic pre-training implicitly alleviates the effects of catastrophic forgetting when learning multiple tasks sequentially compared to randomly initialized models. We then further investigate why pre-training alleviates forgetting in this setting. We study this phenomenon by analyzing the loss landscape, finding that pre-trained weights appear to ease forgetting by leading to wider minima. Based on this insight, we propose jointly optimizing for current task loss and loss basin sharpness to explicitly encourage wider basins during sequential fine-tuning. We show that this optimization approach outperforms several state-of-the-art task-sequential continual learning algorithms across multiple settings, occasionally even without retaining a memory that scales in size with the number of tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.09153v2-abstract-full').style.display = 'none'; document.getElementById('2112.09153v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Journal of Machine Learning Research 24 (2023) 1-50 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.06868">arXiv:2112.06868</a> <span> [<a href="https://arxiv.org/pdf/2112.06868">pdf</a>, <a href="https://arxiv.org/format/2112.06868">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Variational autoencoders in the presence of low-dimensional data: landscape and implicit bias </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Koehler%2C+F">Frederic Koehler</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Viraj Mehta</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+C">Chenghui Zhou</a>, <a href="/search/cs?searchtype=author&query=Risteski%2C+A">Andrej Risteski</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.06868v2-abstract-short" style="display: inline;"> Variational Autoencoders are one of the most commonly used generative models, particularly for image data. A prominent difficulty in training VAEs is data that is supported on a lower-dimensional manifold. Recent work by Dai and Wipf (2020) proposes a two-stage training algorithm for VAEs, based on a conjecture that in standard VAE training the generator will converge to a solution with 0 variance… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.06868v2-abstract-full').style.display = 'inline'; document.getElementById('2112.06868v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.06868v2-abstract-full" style="display: none;"> Variational Autoencoders are one of the most commonly used generative models, particularly for image data. A prominent difficulty in training VAEs is data that is supported on a lower-dimensional manifold. Recent work by Dai and Wipf (2020) proposes a two-stage training algorithm for VAEs, based on a conjecture that in standard VAE training the generator will converge to a solution with 0 variance which is correctly supported on the ground truth manifold. They gave partial support for that conjecture by showing that some optima of the VAE loss do satisfy this property, but did not analyze the training dynamics. In this paper, we show that for linear encoders/decoders, the conjecture is true-that is the VAE training does recover a generator with support equal to the ground truth manifold-and does so due to an implicit bias of gradient descent rather than merely the VAE loss itself. In the nonlinear case, we show that VAE training frequently learns a higher-dimensional manifold which is a superset of the ground truth manifold. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.06868v2-abstract-full').style.display = 'none'; document.getElementById('2112.06868v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted as a conference paper at ICLR 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.05244">arXiv:2112.05244</a> <span> [<a href="https://arxiv.org/pdf/2112.05244">pdf</a>, <a href="https://arxiv.org/format/2112.05244">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> An Experimental Design Perspective on Model-Based Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Viraj Mehta</a>, <a href="/search/cs?searchtype=author&query=Paria%2C+B">Biswajit Paria</a>, <a href="/search/cs?searchtype=author&query=Schneider%2C+J">Jeff Schneider</a>, <a href="/search/cs?searchtype=author&query=Ermon%2C+S">Stefano Ermon</a>, <a href="/search/cs?searchtype=author&query=Neiswanger%2C+W">Willie Neiswanger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.05244v2-abstract-short" style="display: inline;"> In many practical applications of RL, it is expensive to observe state transitions from the environment. For example, in the problem of plasma control for nuclear fusion, computing the next state for a given state-action pair requires querying an expensive transition function which can lead to many hours of computer simulation or dollars of scientific research. Such expensive data collection prohi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.05244v2-abstract-full').style.display = 'inline'; document.getElementById('2112.05244v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.05244v2-abstract-full" style="display: none;"> In many practical applications of RL, it is expensive to observe state transitions from the environment. For example, in the problem of plasma control for nuclear fusion, computing the next state for a given state-action pair requires querying an expensive transition function which can lead to many hours of computer simulation or dollars of scientific research. Such expensive data collection prohibits application of standard RL algorithms which usually require a large number of observations to learn. In this work, we address the problem of efficiently learning a policy while making a minimal number of state-action queries to the transition function. In particular, we leverage ideas from Bayesian optimal experimental design to guide the selection of state-action queries for efficient learning. We propose an acquisition function that quantifies how much information a state-action pair would provide about the optimal solution to a Markov decision process. At each iteration, our algorithm maximizes this acquisition function, to choose the most informative state-action pair to be queried, thus yielding a data-efficient RL approach. We experiment with a variety of simulated continuous control problems and show that our approach learns an optimal policy with up to $5$ -- $1,000\times$ less data than model-based RL baselines and $10^3$ -- $10^5\times$ less data than model-free RL baselines. We also provide several ablated comparisons which point to substantial improvements arising from the principled method of obtaining data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.05244v2-abstract-full').style.display = 'none'; document.getElementById('2112.05244v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Conference paper at ICLR 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.10952">arXiv:2111.10952</a> <span> [<a href="https://arxiv.org/pdf/2111.10952">pdf</a>, <a href="https://arxiv.org/format/2111.10952">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> ExT5: Towards Extreme Multi-Task Scaling for Transfer Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Aribandi%2C+V">Vamsi Aribandi</a>, <a href="/search/cs?searchtype=author&query=Tay%2C+Y">Yi Tay</a>, <a href="/search/cs?searchtype=author&query=Schuster%2C+T">Tal Schuster</a>, <a href="/search/cs?searchtype=author&query=Rao%2C+J">Jinfeng Rao</a>, <a href="/search/cs?searchtype=author&query=Zheng%2C+H+S">Huaixiu Steven Zheng</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=Zhuang%2C+H">Honglei Zhuang</a>, <a href="/search/cs?searchtype=author&query=Tran%2C+V+Q">Vinh Q. Tran</a>, <a href="/search/cs?searchtype=author&query=Bahri%2C+D">Dara Bahri</a>, <a href="/search/cs?searchtype=author&query=Ni%2C+J">Jianmo Ni</a>, <a href="/search/cs?searchtype=author&query=Gupta%2C+J">Jai Gupta</a>, <a href="/search/cs?searchtype=author&query=Hui%2C+K">Kai Hui</a>, <a href="/search/cs?searchtype=author&query=Ruder%2C+S">Sebastian Ruder</a>, <a href="/search/cs?searchtype=author&query=Metzler%2C+D">Donald Metzler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.10952v2-abstract-short" style="display: inline;"> Despite the recent success of multi-task learning and transfer learning for natural language processing (NLP), few works have systematically studied the effect of scaling up the number of tasks during pre-training. Towards this goal, this paper introduces ExMix (Extreme Mixture): a massive collection of 107 supervised NLP tasks across diverse domains and task-families. Using ExMix, we study the ef… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.10952v2-abstract-full').style.display = 'inline'; document.getElementById('2111.10952v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.10952v2-abstract-full" style="display: none;"> Despite the recent success of multi-task learning and transfer learning for natural language processing (NLP), few works have systematically studied the effect of scaling up the number of tasks during pre-training. Towards this goal, this paper introduces ExMix (Extreme Mixture): a massive collection of 107 supervised NLP tasks across diverse domains and task-families. Using ExMix, we study the effect of multi-task pre-training at the largest scale to date, and analyze co-training transfer amongst common families of tasks. Through this analysis, we show that manually curating an ideal set of tasks for multi-task pre-training is not straightforward, and that multi-task scaling can vastly improve models on its own. Finally, we propose ExT5: a model pre-trained using a multi-task objective of self-supervised span denoising and supervised ExMix. Via extensive experiments, we show that ExT5 outperforms strong T5 baselines on SuperGLUE, GEM, Rainbow, Closed-Book QA tasks, and several tasks outside of ExMix. ExT5 also significantly improves sample efficiency while pre-training. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.10952v2-abstract-full').style.display = 'none'; document.getElementById('2111.10952v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICLR 2022; see https://youtu.be/FbRcbM4T-50 for a video overview of the paper</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.08467">arXiv:2110.08467</a> <span> [<a href="https://arxiv.org/pdf/2110.08467">pdf</a>, <a href="https://arxiv.org/format/2110.08467">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Improving Compositional Generalization with Self-Training for Data-to-Text Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=Rao%2C+J">Jinfeng Rao</a>, <a href="/search/cs?searchtype=author&query=Tay%2C+Y">Yi Tay</a>, <a href="/search/cs?searchtype=author&query=Kale%2C+M">Mihir Kale</a>, <a href="/search/cs?searchtype=author&query=Parikh%2C+A+P">Ankur P. Parikh</a>, <a href="/search/cs?searchtype=author&query=Strubell%2C+E">Emma Strubell</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.08467v2-abstract-short" style="display: inline;"> Data-to-text generation focuses on generating fluent natural language responses from structured meaning representations (MRs). Such representations are compositional and it is costly to collect responses for all possible combinations of atomic meaning schemata, thereby necessitating few-shot generalization to novel MRs. In this work, we systematically study the compositional generalization of the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.08467v2-abstract-full').style.display = 'inline'; document.getElementById('2110.08467v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.08467v2-abstract-full" style="display: none;"> Data-to-text generation focuses on generating fluent natural language responses from structured meaning representations (MRs). Such representations are compositional and it is costly to collect responses for all possible combinations of atomic meaning schemata, thereby necessitating few-shot generalization to novel MRs. In this work, we systematically study the compositional generalization of the state-of-the-art T5 models in few-shot data-to-text tasks. We show that T5 models fail to generalize to unseen MRs, and we propose a template-based input representation that considerably improves the model's generalization capability. To further improve the model's performance, we propose an approach based on self-training using fine-tuned BLEURT for pseudo response selection. On the commonly-used SGD and Weather benchmarks, the proposed self-training approach improves tree accuracy by 46%+ and reduces the slot error rates by 73%+ over the strong T5 baselines in few-shot settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.08467v2-abstract-full').style.display = 'none'; document.getElementById('2110.08467v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at ACL 2022 main conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.01406">arXiv:2110.01406</a> <span> [<a href="https://arxiv.org/pdf/2110.01406">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Performance">cs.PF</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s42256-023-00652-2">10.1038/s42256-023-00652-2 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> MedPerf: Open Benchmarking Platform for Medical Artificial Intelligence using Federated Evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Karargyris%2C+A">Alexandros Karargyris</a>, <a href="/search/cs?searchtype=author&query=Umeton%2C+R">Renato Umeton</a>, <a href="/search/cs?searchtype=author&query=Sheller%2C+M+J">Micah J. Sheller</a>, <a href="/search/cs?searchtype=author&query=Aristizabal%2C+A">Alejandro Aristizabal</a>, <a href="/search/cs?searchtype=author&query=George%2C+J">Johnu George</a>, <a href="/search/cs?searchtype=author&query=Bala%2C+S">Srini Bala</a>, <a href="/search/cs?searchtype=author&query=Beutel%2C+D+J">Daniel J. Beutel</a>, <a href="/search/cs?searchtype=author&query=Bittorf%2C+V">Victor Bittorf</a>, <a href="/search/cs?searchtype=author&query=Chaudhari%2C+A">Akshay Chaudhari</a>, <a href="/search/cs?searchtype=author&query=Chowdhury%2C+A">Alexander Chowdhury</a>, <a href="/search/cs?searchtype=author&query=Coleman%2C+C">Cody Coleman</a>, <a href="/search/cs?searchtype=author&query=Desinghu%2C+B">Bala Desinghu</a>, <a href="/search/cs?searchtype=author&query=Diamos%2C+G">Gregory Diamos</a>, <a href="/search/cs?searchtype=author&query=Dutta%2C+D">Debo Dutta</a>, <a href="/search/cs?searchtype=author&query=Feddema%2C+D">Diane Feddema</a>, <a href="/search/cs?searchtype=author&query=Fursin%2C+G">Grigori Fursin</a>, <a href="/search/cs?searchtype=author&query=Guo%2C+J">Junyi Guo</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+X">Xinyuan Huang</a>, <a href="/search/cs?searchtype=author&query=Kanter%2C+D">David Kanter</a>, <a href="/search/cs?searchtype=author&query=Kashyap%2C+S">Satyananda Kashyap</a>, <a href="/search/cs?searchtype=author&query=Lane%2C+N">Nicholas Lane</a>, <a href="/search/cs?searchtype=author&query=Mallick%2C+I">Indranil Mallick</a>, <a href="/search/cs?searchtype=author&query=Mascagni%2C+P">Pietro Mascagni</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Virendra Mehta</a>, <a href="/search/cs?searchtype=author&query=Natarajan%2C+V">Vivek Natarajan</a> , et al. (17 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.01406v3-abstract-short" style="display: inline;"> Medical AI has tremendous potential to advance healthcare by supporting the evidence-based practice of medicine, personalizing patient treatment, reducing costs, and improving provider and patient experience. We argue that unlocking this potential requires a systematic way to measure the performance of medical AI models on large-scale heterogeneous data. To meet this need, we are building MedPerf,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.01406v3-abstract-full').style.display = 'inline'; document.getElementById('2110.01406v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.01406v3-abstract-full" style="display: none;"> Medical AI has tremendous potential to advance healthcare by supporting the evidence-based practice of medicine, personalizing patient treatment, reducing costs, and improving provider and patient experience. We argue that unlocking this potential requires a systematic way to measure the performance of medical AI models on large-scale heterogeneous data. To meet this need, we are building MedPerf, an open framework for benchmarking machine learning in the medical domain. MedPerf will enable federated evaluation in which models are securely distributed to different facilities for evaluation, thereby empowering healthcare organizations to assess and verify the performance of AI models in an efficient and human-supervised process, while prioritizing privacy. We describe the current challenges healthcare and AI communities face, the need for an open platform, the design philosophy of MedPerf, its current implementation status, and our roadmap. We call for researchers and organizations to join us in creating the MedPerf open benchmarking platform. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.01406v3-abstract-full').style.display = 'none'; document.getElementById('2110.01406v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.10641">arXiv:2109.10641</a> <span> [<a href="https://arxiv.org/pdf/2109.10641">pdf</a>, <a href="https://arxiv.org/format/2109.10641">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Uncertainty-Aware Training for Cardiac Resynchronisation Therapy Response Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Dawood%2C+T">Tareen Dawood</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+C">Chen Chen</a>, <a href="/search/cs?searchtype=author&query=Andlauer%2C+R">Robin Andlauer</a>, <a href="/search/cs?searchtype=author&query=Sidhu%2C+B+S">Baldeep S. Sidhu</a>, <a href="/search/cs?searchtype=author&query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&query=Gould%2C+J">Justin Gould</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+B">Bradley Porter</a>, <a href="/search/cs?searchtype=author&query=Elliott%2C+M">Mark Elliott</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Vishal Mehta</a>, <a href="/search/cs?searchtype=author&query=Rinaldi%2C+C+A">C. Aldo Rinaldi</a>, <a href="/search/cs?searchtype=author&query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a>, <a href="/search/cs?searchtype=author&query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.10641v1-abstract-short" style="display: inline;"> Evaluation of predictive deep learning (DL) models beyond conventional performance metrics has become increasingly important for applications in sensitive environments like healthcare. Such models might have the capability to encode and analyse large sets of data but they often lack comprehensive interpretability methods, preventing clinical trust in predictive outcomes. Quantifying uncertainty of… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.10641v1-abstract-full').style.display = 'inline'; document.getElementById('2109.10641v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.10641v1-abstract-full" style="display: none;"> Evaluation of predictive deep learning (DL) models beyond conventional performance metrics has become increasingly important for applications in sensitive environments like healthcare. Such models might have the capability to encode and analyse large sets of data but they often lack comprehensive interpretability methods, preventing clinical trust in predictive outcomes. Quantifying uncertainty of a prediction is one way to provide such interpretability and promote trust. However, relatively little attention has been paid to how to include such requirements into the training of the model. In this paper we: (i) quantify the data (aleatoric) and model (epistemic) uncertainty of a DL model for Cardiac Resynchronisation Therapy response prediction from cardiac magnetic resonance images, and (ii) propose and perform a preliminary investigation of an uncertainty-aware loss function that can be used to retrain an existing DL image-based classification model to encourage confidence in correct predictions and reduce confidence in incorrect predictions. Our initial results are promising, showing a significant increase in the (epistemic) confidence of true positive predictions, with some evidence of a reduction in false negative confidence. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.10641v1-abstract-full').style.display = 'none'; document.getElementById('2109.10641v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">STACOM 2021 Workshop</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.08414">arXiv:2102.08414</a> <span> [<a href="https://arxiv.org/pdf/2102.08414">pdf</a>, <a href="https://arxiv.org/format/2102.08414">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1093/mnras/stab2093">10.1093/mnras/stab2093 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Galaxy Zoo DECaLS: Detailed Visual Morphology Measurements from Volunteers and Deep Learning for 314,000 Galaxies </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&query=Lintott%2C+C">Chris Lintott</a>, <a href="/search/cs?searchtype=author&query=Geron%2C+T">Tobias Geron</a>, <a href="/search/cs?searchtype=author&query=Kruk%2C+S">Sandor Kruk</a>, <a href="/search/cs?searchtype=author&query=Krawczyk%2C+C">Coleman Krawczyk</a>, <a href="/search/cs?searchtype=author&query=Willett%2C+K+W">Kyle W. Willett</a>, <a href="/search/cs?searchtype=author&query=Bamford%2C+S">Steven Bamford</a>, <a href="/search/cs?searchtype=author&query=Kelvin%2C+L+S">Lee S. Kelvin</a>, <a href="/search/cs?searchtype=author&query=Fortson%2C+L">Lucy Fortson</a>, <a href="/search/cs?searchtype=author&query=Gal%2C+Y">Yarin Gal</a>, <a href="/search/cs?searchtype=author&query=Keel%2C+W">William Keel</a>, <a href="/search/cs?searchtype=author&query=Masters%2C+K+L">Karen L. Masters</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Vihang Mehta</a>, <a href="/search/cs?searchtype=author&query=Simmons%2C+B+D">Brooke D. Simmons</a>, <a href="/search/cs?searchtype=author&query=Smethurst%2C+R">Rebecca Smethurst</a>, <a href="/search/cs?searchtype=author&query=Smith%2C+L">Lewis Smith</a>, <a href="/search/cs?searchtype=author&query=Baeten%2C+E+M">Elisabeth M. Baeten</a>, <a href="/search/cs?searchtype=author&query=Macmillan%2C+C">Christine Macmillan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.08414v2-abstract-short" style="display: inline;"> We present Galaxy Zoo DECaLS: detailed visual morphological classifications for Dark Energy Camera Legacy Survey images of galaxies within the SDSS DR8 footprint. Deeper DECaLS images (r=23.6 vs. r=22.2 from SDSS) reveal spiral arms, weak bars, and tidal features not previously visible in SDSS imaging. To best exploit the greater depth of DECaLS images, volunteers select from a new set of answers… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.08414v2-abstract-full').style.display = 'inline'; document.getElementById('2102.08414v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.08414v2-abstract-full" style="display: none;"> We present Galaxy Zoo DECaLS: detailed visual morphological classifications for Dark Energy Camera Legacy Survey images of galaxies within the SDSS DR8 footprint. Deeper DECaLS images (r=23.6 vs. r=22.2 from SDSS) reveal spiral arms, weak bars, and tidal features not previously visible in SDSS imaging. To best exploit the greater depth of DECaLS images, volunteers select from a new set of answers designed to improve our sensitivity to mergers and bars. Galaxy Zoo volunteers provide 7.5 million individual classifications over 314,000 galaxies. 140,000 galaxies receive at least 30 classifications, sufficient to accurately measure detailed morphology like bars, and the remainder receive approximately 5. All classifications are used to train an ensemble of Bayesian convolutional neural networks (a state-of-the-art deep learning method) to predict posteriors for the detailed morphology of all 314,000 galaxies. When measured against confident volunteer classifications, the networks are approximately 99% accurate on every question. Morphology is a fundamental feature of every galaxy; our human and machine classifications are an accurate and detailed resource for understanding how galaxies evolve. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.08414v2-abstract-full').style.display = 'none'; document.getElementById('2102.08414v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by MNRAS July '21. Open access DOI below. Data at https://doi.org/10.5281/zenodo.4196266. Code at https://www.github.com/mwalmsley/zoobot. Docs at https://zoobot.readthedocs.io/. Interactive viewer at https://share.streamlit.io/mwalmsley/galaxy-poster/gz_decals_mike_walmsley.py</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.03321">arXiv:2101.03321</a> <span> [<a href="https://arxiv.org/pdf/2101.03321">pdf</a>, <a href="https://arxiv.org/format/2101.03321">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> FakeBuster: A DeepFakes Detection Tool for Video Conferencing Scenarios </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Vineet Mehta</a>, <a href="/search/cs?searchtype=author&query=Gupta%2C+P">Parul Gupta</a>, <a href="/search/cs?searchtype=author&query=Subramanian%2C+R">Ramanathan Subramanian</a>, <a href="/search/cs?searchtype=author&query=Dhall%2C+A">Abhinav Dhall</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.03321v1-abstract-short" style="display: inline;"> This paper proposes a new DeepFake detector FakeBuster for detecting impostors during video conferencing and manipulated faces on social media. FakeBuster is a standalone deep learning based solution, which enables a user to detect if another person's video is manipulated or spoofed during a video conferencing based meeting. This tool is independent of video conferencing solutions and has been tes… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.03321v1-abstract-full').style.display = 'inline'; document.getElementById('2101.03321v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.03321v1-abstract-full" style="display: none;"> This paper proposes a new DeepFake detector FakeBuster for detecting impostors during video conferencing and manipulated faces on social media. FakeBuster is a standalone deep learning based solution, which enables a user to detect if another person's video is manipulated or spoofed during a video conferencing based meeting. This tool is independent of video conferencing solutions and has been tested with Zoom and Skype applications. It uses a 3D convolutional neural network for predicting video segment-wise fakeness scores. The network is trained on a combination of datasets such as Deeperforensics, DFDC, VoxCeleb, and deepfake videos created using locally captured (for video conferencing scenarios) images. This leads to different environments and perturbations in the dataset, which improves the generalization of the deepfake network. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.03321v1-abstract-full').style.display = 'none'; document.getElementById('2101.03321v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 Pages, 3 Figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.02500">arXiv:2010.02500</a> <span> [<a href="https://arxiv.org/pdf/2010.02500">pdf</a>, <a href="https://arxiv.org/format/2010.02500">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Efficient Meta Lifelong-Learning with Limited Memory </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wang%2C+Z">Zirui Wang</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=P%C3%B3czos%2C+B">Barnab谩s P贸czos</a>, <a href="/search/cs?searchtype=author&query=Carbonell%2C+J">Jaime Carbonell</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.02500v1-abstract-short" style="display: inline;"> Current natural language processing models work well on a single task, yet they often fail to continuously learn new tasks without forgetting previous ones as they are re-trained throughout their lifetime, a challenge known as lifelong learning. State-of-the-art lifelong language learning methods store past examples in episodic memory and replay them at both training and inference time. However, a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.02500v1-abstract-full').style.display = 'inline'; document.getElementById('2010.02500v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.02500v1-abstract-full" style="display: none;"> Current natural language processing models work well on a single task, yet they often fail to continuously learn new tasks without forgetting previous ones as they are re-trained throughout their lifetime, a challenge known as lifelong learning. State-of-the-art lifelong language learning methods store past examples in episodic memory and replay them at both training and inference time. However, as we show later in our experiments, there are three significant impediments: (1) needing unrealistically large memory module to achieve good performance, (2) suffering from negative transfer, (3) requiring multiple local adaptation steps for each test example that significantly slows down the inference speed. In this paper, we identify three common principles of lifelong learning methods and propose an efficient meta-lifelong framework that combines them in a synergistic fashion. To achieve sample efficiency, our method trains the model in a manner that it learns a better initialization for local adaptation. Extensive experiments on text classification and question answering benchmarks demonstrate the effectiveness of our framework by achieving state-of-the-art performance using merely 1% memory size and narrowing the gap with multi-task learning. We further show that our method alleviates both catastrophic forgetting and negative transfer at the same time. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.02500v1-abstract-full').style.display = 'none'; document.getElementById('2010.02500v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published as a main conference paper at EMNLP 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.01155">arXiv:2010.01155</a> <span> [<a href="https://arxiv.org/pdf/2010.01155">pdf</a>, <a href="https://arxiv.org/format/2010.01155">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Representational aspects of depth and conditioning in normalizing flows </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Koehler%2C+F">Frederic Koehler</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Viraj Mehta</a>, <a href="/search/cs?searchtype=author&query=Risteski%2C+A">Andrej Risteski</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.01155v2-abstract-short" style="display: inline;"> Normalizing flows are among the most popular paradigms in generative modeling, especially for images, primarily because we can efficiently evaluate the likelihood of a data point. This is desirable both for evaluating the fit of a model, and for ease of training, as maximizing the likelihood can be done by gradient descent. However, training normalizing flows comes with difficulties as well: model… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.01155v2-abstract-full').style.display = 'inline'; document.getElementById('2010.01155v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.01155v2-abstract-full" style="display: none;"> Normalizing flows are among the most popular paradigms in generative modeling, especially for images, primarily because we can efficiently evaluate the likelihood of a data point. This is desirable both for evaluating the fit of a model, and for ease of training, as maximizing the likelihood can be done by gradient descent. However, training normalizing flows comes with difficulties as well: models which produce good samples typically need to be extremely deep -- which comes with accompanying vanishing/exploding gradient problems. A very related problem is that they are often poorly conditioned: since they are parametrized as invertible maps from $\mathbb{R}^d \to \mathbb{R}^d$, and typical training data like images intuitively is lower-dimensional, the learned maps often have Jacobians that are close to being singular. In our paper, we tackle representational aspects around depth and conditioning of normalizing flows: both for general invertible architectures, and for a particular common architecture, affine couplings. We prove that $螛(1)$ affine coupling layers suffice to exactly represent a permutation or $1 \times 1$ convolution, as used in GLOW, showing that representationally the choice of partition is not a bottleneck for depth. We also show that shallow affine coupling networks are universal approximators in Wasserstein distance if ill-conditioning is allowed, and experimentally investigate related phenomena involving padding. Finally, we show a depth lower bound for general flow architectures with few neurons per layer and bounded Lipschitz constant. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.01155v2-abstract-full').style.display = 'none'; document.getElementById('2010.01155v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Appeared in ICML 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.13811">arXiv:2006.13811</a> <span> [<a href="https://arxiv.org/pdf/2006.13811">pdf</a>, <a href="https://arxiv.org/format/2006.13811">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-59710-8_28">10.1007/978-3-030-59710-8_28 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Interpretable Deep Models for Cardiac Resynchronisation Therapy Response Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+C">Chen Chen</a>, <a href="/search/cs?searchtype=author&query=Clough%2C+J+R">James R. Clough</a>, <a href="/search/cs?searchtype=author&query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&query=Sidhu%2C+B+S">Baldeep S. Sidhu</a>, <a href="/search/cs?searchtype=author&query=Gould%2C+J">Justin Gould</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+B">Bradley Porter</a>, <a href="/search/cs?searchtype=author&query=Elliott%2C+M">Mark Elliott</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Vishal Mehta</a>, <a href="/search/cs?searchtype=author&query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&query=Rinaldi%2C+C+A">Christopher A. Rinaldi</a>, <a href="/search/cs?searchtype=author&query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.13811v2-abstract-short" style="display: inline;"> Advances in deep learning (DL) have resulted in impressive accuracy in some medical image classification tasks, but often deep models lack interpretability. The ability of these models to explain their decisions is important for fostering clinical trust and facilitating clinical translation. Furthermore, for many problems in medicine there is a wealth of existing clinical knowledge to draw upon, w… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.13811v2-abstract-full').style.display = 'inline'; document.getElementById('2006.13811v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.13811v2-abstract-full" style="display: none;"> Advances in deep learning (DL) have resulted in impressive accuracy in some medical image classification tasks, but often deep models lack interpretability. The ability of these models to explain their decisions is important for fostering clinical trust and facilitating clinical translation. Furthermore, for many problems in medicine there is a wealth of existing clinical knowledge to draw upon, which may be useful in generating explanations, but it is not obvious how this knowledge can be encoded into DL models - most models are learnt either from scratch or using transfer learning from a different domain. In this paper we address both of these issues. We propose a novel DL framework for image-based classification based on a variational autoencoder (VAE). The framework allows prediction of the output of interest from the latent space of the autoencoder, as well as visualisation (in the image domain) of the effects of crossing the decision boundary, thus enhancing the interpretability of the classifier. Our key contribution is that the VAE disentangles the latent space based on `explanations' drawn from existing clinical knowledge. The framework can predict outputs as well as explanations for these outputs, and also raises the possibility of discovering new biomarkers that are separate (or disentangled) from the existing knowledge. We demonstrate our framework on the problem of predicting response of patients with cardiomyopathy to cardiac resynchronization therapy (CRT) from cine cardiac magnetic resonance images. The sensitivity and specificity of the proposed model on the task of CRT response prediction are 88.43% and 84.39% respectively, and we showcase the potential of our model in enhancing understanding of the factors contributing to CRT response. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.13811v2-abstract-full').style.display = 'none'; document.getElementById('2006.13811v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">MICCAI 2020 conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.12682">arXiv:2006.12682</a> <span> [<a href="https://arxiv.org/pdf/2006.12682">pdf</a>, <a href="https://arxiv.org/format/2006.12682">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/CDC45484.2021.9682807">10.1109/CDC45484.2021.9682807 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Neural Dynamical Systems: Balancing Structure and Flexibility in Physical Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Viraj Mehta</a>, <a href="/search/cs?searchtype=author&query=Char%2C+I">Ian Char</a>, <a href="/search/cs?searchtype=author&query=Neiswanger%2C+W">Willie Neiswanger</a>, <a href="/search/cs?searchtype=author&query=Chung%2C+Y">Youngseog Chung</a>, <a href="/search/cs?searchtype=author&query=Nelson%2C+A+O">Andrew Oakleigh Nelson</a>, <a href="/search/cs?searchtype=author&query=Boyer%2C+M+D">Mark D Boyer</a>, <a href="/search/cs?searchtype=author&query=Kolemen%2C+E">Egemen Kolemen</a>, <a href="/search/cs?searchtype=author&query=Schneider%2C+J">Jeff Schneider</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.12682v2-abstract-short" style="display: inline;"> We introduce Neural Dynamical Systems (NDS), a method of learning dynamical models in various gray-box settings which incorporates prior knowledge in the form of systems of ordinary differential equations. NDS uses neural networks to estimate free parameters of the system, predicts residual terms, and numerically integrates over time to predict future states. A key insight is that many real dynami… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.12682v2-abstract-full').style.display = 'inline'; document.getElementById('2006.12682v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.12682v2-abstract-full" style="display: none;"> We introduce Neural Dynamical Systems (NDS), a method of learning dynamical models in various gray-box settings which incorporates prior knowledge in the form of systems of ordinary differential equations. NDS uses neural networks to estimate free parameters of the system, predicts residual terms, and numerically integrates over time to predict future states. A key insight is that many real dynamical systems of interest are hard to model because the dynamics may vary across rollouts. We mitigate this problem by taking a trajectory of prior states as the input to NDS and train it to dynamically estimate system parameters using the preceding trajectory. We find that NDS learns dynamics with higher accuracy and fewer samples than a variety of deep learning methods that do not incorporate the prior knowledge and methods from the system identification literature which do. We demonstrate these advantages first on synthetic dynamical systems and then on real data captured from deuterium shots from a nuclear fusion reactor. Finally, we demonstrate that these benefits can be utilized for control in small-scale experiments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.12682v2-abstract-full').style.display = 'none'; document.getElementById('2006.12682v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.05899">arXiv:2005.05899</a> <span> [<a href="https://arxiv.org/pdf/2005.05899">pdf</a>, <a href="https://arxiv.org/format/2005.05899">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Engineering, Finance, and Science">cs.CE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.future.2020.01.045">10.1016/j.future.2020.01.045 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Heterogeneous CPU/GPU co-execution of CFD simulations on the POWER9 architecture: Application to airplane aerodynamics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Borrell%2C+R">R. Borrell</a>, <a href="/search/cs?searchtype=author&query=Dosimont%2C+D">D. Dosimont</a>, <a href="/search/cs?searchtype=author&query=Garcia-Gasulla%2C+M">M. Garcia-Gasulla</a>, <a href="/search/cs?searchtype=author&query=Houzeaux%2C+G">G. Houzeaux</a>, <a href="/search/cs?searchtype=author&query=Lehmkuhl%2C+O">O. Lehmkuhl</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">V. Mehta</a>, <a href="/search/cs?searchtype=author&query=Owen%2C+H">H. Owen</a>, <a href="/search/cs?searchtype=author&query=Vazquez%2C+M">M. Vazquez</a>, <a href="/search/cs?searchtype=author&query=Oyarzun%2C+G">G. Oyarzun</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.05899v3-abstract-short" style="display: inline;"> High fidelity Computational Fluid Dynamics simulations are generally associated with large computing requirements, which are progressively acute with each new generation of supercomputers. However, significant research efforts are required to unlock the computing power of leading-edge systems, currently referred to as pre-Exascale systems, based on increasingly complex architectures. In this paper… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.05899v3-abstract-full').style.display = 'inline'; document.getElementById('2005.05899v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.05899v3-abstract-full" style="display: none;"> High fidelity Computational Fluid Dynamics simulations are generally associated with large computing requirements, which are progressively acute with each new generation of supercomputers. However, significant research efforts are required to unlock the computing power of leading-edge systems, currently referred to as pre-Exascale systems, based on increasingly complex architectures. In this paper, we present the approach implemented in the computational mechanics code Alya. We describe in detail the parallelization strategy implemented to fully exploit the different levels of parallelism, together with a novel co-execution method for the efficient utilization of heterogeneous CPU/GPU architectures. The latter is based on a multi-code co-execution approach with a dynamic load balancing mechanism. The assessment of the performance of all the proposed strategies has been carried out for airplane simulations on the POWER9 architecture accelerated with NVIDIA Volta V100 GPUs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.05899v3-abstract-full').style.display = 'none'; document.getElementById('2005.05899v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Future Generation Computer Systems, Volume 107, 2020,Pages 31-48 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.08352">arXiv:2004.08352</a> <span> [<a href="https://arxiv.org/pdf/2004.08352">pdf</a>, <a href="https://arxiv.org/format/2004.08352">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ICPR48806.2021.9412632">10.1109/ICPR48806.2021.9412632 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Motion and Region Aware Adversarial Learning for Fall Detection with Thermal Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Vineet Mehta</a>, <a href="/search/cs?searchtype=author&query=Dhall%2C+A">Abhinav Dhall</a>, <a href="/search/cs?searchtype=author&query=Pal%2C+S">Sujata Pal</a>, <a href="/search/cs?searchtype=author&query=Khan%2C+S+S">Shehroz S. Khan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.08352v2-abstract-short" style="display: inline;"> Automatic fall detection is a vital technology for ensuring the health and safety of people. Home-based camera systems for fall detection often put people's privacy at risk. Thermal cameras can partially or fully obfuscate facial features, thus preserving the privacy of a person. Another challenge is the less occurrence of falls in comparison to the normal activities of daily living. As fall occur… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.08352v2-abstract-full').style.display = 'inline'; document.getElementById('2004.08352v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.08352v2-abstract-full" style="display: none;"> Automatic fall detection is a vital technology for ensuring the health and safety of people. Home-based camera systems for fall detection often put people's privacy at risk. Thermal cameras can partially or fully obfuscate facial features, thus preserving the privacy of a person. Another challenge is the less occurrence of falls in comparison to the normal activities of daily living. As fall occurs rarely, it is non-trivial to learn algorithms due to class imbalance. To handle these problems, we formulate fall detection as an anomaly detection within an adversarial framework using thermal imaging. We present a novel adversarial network that comprises of two-channel 3D convolutional autoencoders which reconstructs the thermal data and the optical flow input sequences respectively. We introduce a technique to track the region of interest, a region-based difference constraint, and a joint discriminator to compute the reconstruction error. A larger reconstruction error indicates the occurrence of a fall. The experiments on a publicly available thermal fall dataset show the superior results obtained compared to the standard baseline. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.08352v2-abstract-full').style.display = 'none'; document.getElementById('2004.08352v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages,7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1909.06743">arXiv:1909.06743</a> <span> [<a href="https://arxiv.org/pdf/1909.06743">pdf</a>, <a href="https://arxiv.org/format/1909.06743">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Learning Rhyming Constraints using Structured Adversaries </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jhamtani%2C+H">Harsh Jhamtani</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=Carbonell%2C+J">Jaime Carbonell</a>, <a href="/search/cs?searchtype=author&query=Berg-Kirkpatrick%2C+T">Taylor Berg-Kirkpatrick</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1909.06743v1-abstract-short" style="display: inline;"> Existing recurrent neural language models often fail to capture higher-level structure present in text: for example, rhyming patterns present in poetry. Much prior work on poetry generation uses manually defined constraints which are satisfied during decoding using either specialized decoding procedures or rejection sampling. The rhyming constraints themselves are typically not learned by the gene… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.06743v1-abstract-full').style.display = 'inline'; document.getElementById('1909.06743v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1909.06743v1-abstract-full" style="display: none;"> Existing recurrent neural language models often fail to capture higher-level structure present in text: for example, rhyming patterns present in poetry. Much prior work on poetry generation uses manually defined constraints which are satisfied during decoding using either specialized decoding procedures or rejection sampling. The rhyming constraints themselves are typically not learned by the generator. We propose an alternate approach that uses a structured discriminator to learn a poetry generator that directly captures rhyming constraints in a generative adversarial setup. By causing the discriminator to compare poems based only on a learned similarity matrix of pairs of line ending words, the proposed approach is able to successfully learn rhyming patterns in two different English poetry datasets (Sonnet and Limerick) without explicitly being provided with any phonetic information. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.06743v1-abstract-full').style.display = 'none'; document.getElementById('1909.06743v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">EMNLP-IJCNLP 2019 Short Paper</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.08962">arXiv:1904.08962</a> <span> [<a href="https://arxiv.org/pdf/1904.08962">pdf</a>, <a href="https://arxiv.org/ps/1904.08962">ps</a>, <a href="https://arxiv.org/format/1904.08962">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Constrained Restless Bandits for Dynamic Scheduling in Cyber-Physical Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kaza%2C+K">Kesav Kaza</a>, <a href="/search/cs?searchtype=author&query=Meshram%2C+R">Rahul Meshram</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Varun Mehta</a>, <a href="/search/cs?searchtype=author&query=Merchant%2C+S+N">S. N. Merchant</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.08962v5-abstract-short" style="display: inline;"> This paper studies a class of constrained restless multi-armed bandits (CRMAB). The constraints are in the form of time varying set of actions (set of available arms). This variation can be either stochastic or semi-deterministic. Given a set of arms, a fixed number of them can be chosen to be played in each decision interval. The play of each arm yields a state dependent reward. The current state… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.08962v5-abstract-full').style.display = 'inline'; document.getElementById('1904.08962v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.08962v5-abstract-full" style="display: none;"> This paper studies a class of constrained restless multi-armed bandits (CRMAB). The constraints are in the form of time varying set of actions (set of available arms). This variation can be either stochastic or semi-deterministic. Given a set of arms, a fixed number of them can be chosen to be played in each decision interval. The play of each arm yields a state dependent reward. The current states of arms are partially observable through binary feedback signals from arms that are played. The current availability of arms is fully observable. The objective is to maximize long term cumulative reward. The uncertainty about future availability of arms along with partial state information makes this objective challenging. Applications for CRMAB can be found in resource allocation in cyber-physical systems involving components with time varying availability. First, this optimization problem is analyzed using Whittle's index policy. To this end, a constrained restless single-armed bandit is studied. It is shown to admit a threshold-type optimal policy and is also indexable. An algorithm to compute Whittle's index is presented. An alternate solution method with lower complexity is also presented in the form of an online rollout policy. A detailed discussion on the complexity of both these schemes is also presented, which suggests that online rollout policy with short look ahead is simpler to implement than Whittle's index computation. Further, upper bounds on the value function are derived in order to estimate the degree of sub-optimality of various solutions. The simulation study compares the performance of Whittle's index, online rollout, myopic and modified Whittle's index policies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.08962v5-abstract-full').style.display = 'none'; document.getElementById('1904.08962v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1810.09007">arXiv:1810.09007</a> <span> [<a href="https://arxiv.org/pdf/1810.09007">pdf</a>, <a href="https://arxiv.org/format/1810.09007">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Spatial Co-location Pattern Mining - A new perspective using Graph Database </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=Sodhani%2C+S">Shagun Sodhani</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+D">Dhaval Patel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1810.09007v1-abstract-short" style="display: inline;"> Spatial co-location pattern mining refers to the task of discovering the group of objects or events that co-occur at many places. Extracting these patterns from spatial data is very difficult due to the complexity of spatial data types, spatial relationships, and spatial auto-correlation. These patterns have applications in domains including public safety, geo-marketing, crime prediction and ecolo… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.09007v1-abstract-full').style.display = 'inline'; document.getElementById('1810.09007v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1810.09007v1-abstract-full" style="display: none;"> Spatial co-location pattern mining refers to the task of discovering the group of objects or events that co-occur at many places. Extracting these patterns from spatial data is very difficult due to the complexity of spatial data types, spatial relationships, and spatial auto-correlation. These patterns have applications in domains including public safety, geo-marketing, crime prediction and ecology. Prior work focused on using the spatial join. While these approaches provide state-of-the-art results, they are very expensive to compute due to the multiway spatial join and scaling them to real-world datasets is an open problem. We address these limitations by formulating the co-location pattern discovery as a clique enumeration problem over a neighborhood graph (which is materialized using a distributed graph database). We propose three new traversal based algorithms, namely $CliqueEnum_G$, $CliqueEnum_K$ and $CliqueExtend$. We provide the empirical evidence for the effectiveness of our proposed algorithms by evaluating them for a large real-life dataset. The three algorithms allow for a trade-off between time and memory requirements and support interactive data analysis without having to recompute all the intermediate results. These attributes make our algorithms applicable to a wide range of use cases for different data sizes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.09007v1-abstract-full').style.display = 'none'; document.getElementById('1810.09007v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1808.09543">arXiv:1808.09543</a> <span> [<a href="https://arxiv.org/pdf/1808.09543">pdf</a>, <a href="https://arxiv.org/ps/1808.09543">ps</a>, <a href="https://arxiv.org/format/1808.09543">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Towards Semi-Supervised Learning for Deep Semantic Role Labeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+J+Y">Jay Yoon Lee</a>, <a href="/search/cs?searchtype=author&query=Carbonell%2C+J">Jaime Carbonell</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1808.09543v1-abstract-short" style="display: inline;"> Neural models have shown several state-of-the-art performances on Semantic Role Labeling (SRL). However, the neural models require an immense amount of semantic-role corpora and are thus not well suited for low-resource languages or domains. The paper proposes a semi-supervised semantic role labeling method that outperforms the state-of-the-art in limited SRL training corpora. The method is based… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.09543v1-abstract-full').style.display = 'inline'; document.getElementById('1808.09543v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1808.09543v1-abstract-full" style="display: none;"> Neural models have shown several state-of-the-art performances on Semantic Role Labeling (SRL). However, the neural models require an immense amount of semantic-role corpora and are thus not well suited for low-resource languages or domains. The paper proposes a semi-supervised semantic role labeling method that outperforms the state-of-the-art in limited SRL training corpora. The method is based on explicitly enforcing syntactic constraints by augmenting the training objective with a syntactic-inconsistency loss component and uses SRL-unlabeled instances to train a joint-objective LSTM. On CoNLL-2012 English section, the proposed semi-supervised training with 1%, 10% SRL-labeled data and varying amounts of SRL-unlabeled data achieves +1.58, +0.78 F1, respectively, over the pre-trained models that were trained on SOTA architecture with ELMo on the same SRL-labeled data. Additionally, by using the syntactic-inconsistency loss on inference time, the proposed model achieves +3.67, +2.1 F1 over pre-trained model on 1%, 10% SRL-labeled data, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.09543v1-abstract-full').style.display = 'none'; document.getElementById('1808.09543v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 August, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">EMNLP 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.09266">arXiv:1806.09266</a> <span> [<a href="https://arxiv.org/pdf/1806.09266">pdf</a>, <a href="https://arxiv.org/format/1806.09266">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Learning Task-Oriented Grasping for Tool Manipulation from Simulated Self-Supervision </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Fang%2C+K">Kuan Fang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+Y">Yuke Zhu</a>, <a href="/search/cs?searchtype=author&query=Garg%2C+A">Animesh Garg</a>, <a href="/search/cs?searchtype=author&query=Kurenkov%2C+A">Andrey Kurenkov</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Viraj Mehta</a>, <a href="/search/cs?searchtype=author&query=Fei-Fei%2C+L">Li Fei-Fei</a>, <a href="/search/cs?searchtype=author&query=Savarese%2C+S">Silvio Savarese</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.09266v1-abstract-short" style="display: inline;"> Tool manipulation is vital for facilitating robots to complete challenging task goals. It requires reasoning about the desired effect of the task and thus properly grasping and manipulating the tool to achieve the task. Task-agnostic grasping optimizes for grasp robustness while ignoring crucial task-specific constraints. In this paper, we propose the Task-Oriented Grasping Network (TOG-Net) to jo… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.09266v1-abstract-full').style.display = 'inline'; document.getElementById('1806.09266v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.09266v1-abstract-full" style="display: none;"> Tool manipulation is vital for facilitating robots to complete challenging task goals. It requires reasoning about the desired effect of the task and thus properly grasping and manipulating the tool to achieve the task. Task-agnostic grasping optimizes for grasp robustness while ignoring crucial task-specific constraints. In this paper, we propose the Task-Oriented Grasping Network (TOG-Net) to jointly optimize both task-oriented grasping of a tool and the manipulation policy for that tool. The training process of the model is based on large-scale simulated self-supervision with procedurally generated tool objects. We perform both simulated and real-world experiments on two tool-based manipulation tasks: sweeping and hammering. Our model achieves overall 71.1% task success rate for sweeping and 80.0% task success rate for hammering. Supplementary material is available at: bit.ly/task-oriented-grasp <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.09266v1-abstract-full').style.display = 'none'; document.getElementById('1806.09266v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">RSS 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.10030">arXiv:1805.10030</a> <span> [<a href="https://arxiv.org/pdf/1805.10030">pdf</a>, <a href="https://arxiv.org/format/1805.10030">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DIF : Dataset of Perceived Intoxicated Faces for Drunk Person Identification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Vineet Mehta</a>, <a href="/search/cs?searchtype=author&query=Yadav%2C+D+P">Devendra Pratap Yadav</a>, <a href="/search/cs?searchtype=author&query=Katta%2C+S+S">Sai Srinadhu Katta</a>, <a href="/search/cs?searchtype=author&query=Dhall%2C+A">Abhinav Dhall</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.10030v3-abstract-short" style="display: inline;"> Traffic accidents cause over a million deaths every year, of which a large fraction is attributed to drunk driving. An automated intoxicated driver detection system in vehicles will be useful in reducing accidents and related financial costs. Existing solutions require special equipment such as electrocardiogram, infrared cameras or breathalyzers. In this work, we propose a new dataset called DIF… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.10030v3-abstract-full').style.display = 'inline'; document.getElementById('1805.10030v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.10030v3-abstract-full" style="display: none;"> Traffic accidents cause over a million deaths every year, of which a large fraction is attributed to drunk driving. An automated intoxicated driver detection system in vehicles will be useful in reducing accidents and related financial costs. Existing solutions require special equipment such as electrocardiogram, infrared cameras or breathalyzers. In this work, we propose a new dataset called DIF (Dataset of perceived Intoxicated Faces) which contains audio-visual data of intoxicated and sober people obtained from online sources. To the best of our knowledge, this is the first work for automatic bimodal non-invasive intoxication detection. Convolutional Neural Networks (CNN) and Deep Neural Networks (DNN) are trained for computing the video and audio baselines, respectively. 3D CNN is used to exploit the Spatio-temporal changes in the video. A simple variation of the traditional 3D convolution block is proposed based on inducing non-linearity between the spatial and temporal channels. Extensive experiments are performed to validate the approach and baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.10030v3-abstract-full').style.display = 'none'; document.getElementById('1805.10030v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1801.01301">arXiv:1801.01301</a> <span> [<a href="https://arxiv.org/pdf/1801.01301">pdf</a>, <a href="https://arxiv.org/ps/1801.01301">ps</a>, <a href="https://arxiv.org/format/1801.01301">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Sequential Decision Making with Limited Observation Capability: Application to Wireless Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kaza%2C+K">Kesav Kaza</a>, <a href="/search/cs?searchtype=author&query=Meshram%2C+R">Rahul Meshram</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Varun Mehta</a>, <a href="/search/cs?searchtype=author&query=Merchant%2C+S+N">S. N. Merchant</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1801.01301v2-abstract-short" style="display: inline;"> This work studies a generalized class of restless multi-armed bandits with hidden states and allow cumulative feedback, as opposed to the conventional instantaneous feedback. We call them lazy restless bandits (LRB) as the events of decision-making are sparser than events of state transition. Hence, feedback after each decision event is the cumulative effect of the following state transition event… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1801.01301v2-abstract-full').style.display = 'inline'; document.getElementById('1801.01301v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1801.01301v2-abstract-full" style="display: none;"> This work studies a generalized class of restless multi-armed bandits with hidden states and allow cumulative feedback, as opposed to the conventional instantaneous feedback. We call them lazy restless bandits (LRB) as the events of decision-making are sparser than events of state transition. Hence, feedback after each decision event is the cumulative effect of the following state transition events. The states of arms are hidden from the decision-maker and rewards for actions are state dependent. The decision-maker needs to choose one arm in each decision interval, such that long term cumulative reward is maximized. As the states are hidden, the decision-maker maintains and updates its belief about them. It is shown that LRBs admit an optimal policy which has threshold structure in belief space. The Whittle-index policy for solving LRB problem is analyzed; indexability of LRBs is shown. Further, closed-form index expressions are provided for two sets of special cases; for more general cases, an algorithm for index computation is provided. An extensive simulation study is presented; Whittle-index, modified Whittle-index and myopic policies are compared. Lagrangian relaxation of the problem provides an upper bound on the optimal value function; it is used to assess the degree of sub-optimality various policies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1801.01301v2-abstract-full').style.display = 'none'; document.getElementById('1801.01301v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 January, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 January, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1708.04672">arXiv:1708.04672</a> <span> [<a href="https://arxiv.org/pdf/1708.04672">pdf</a>, <a href="https://arxiv.org/format/1708.04672">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Graphics">cs.GR</span> </div> </div> <p class="title is-5 mathjax"> DeformNet: Free-Form Deformation Network for 3D Shape Reconstruction from a Single Image </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kurenkov%2C+A">Andrey Kurenkov</a>, <a href="/search/cs?searchtype=author&query=Ji%2C+J">Jingwei Ji</a>, <a href="/search/cs?searchtype=author&query=Garg%2C+A">Animesh Garg</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Viraj Mehta</a>, <a href="/search/cs?searchtype=author&query=Gwak%2C+J">JunYoung Gwak</a>, <a href="/search/cs?searchtype=author&query=Choy%2C+C">Christopher Choy</a>, <a href="/search/cs?searchtype=author&query=Savarese%2C+S">Silvio Savarese</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1708.04672v1-abstract-short" style="display: inline;"> 3D reconstruction from a single image is a key problem in multiple applications ranging from robotic manipulation to augmented reality. Prior methods have tackled this problem through generative models which predict 3D reconstructions as voxels or point clouds. However, these methods can be computationally expensive and miss fine details. We introduce a new differentiable layer for 3D data deforma… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1708.04672v1-abstract-full').style.display = 'inline'; document.getElementById('1708.04672v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1708.04672v1-abstract-full" style="display: none;"> 3D reconstruction from a single image is a key problem in multiple applications ranging from robotic manipulation to augmented reality. Prior methods have tackled this problem through generative models which predict 3D reconstructions as voxels or point clouds. However, these methods can be computationally expensive and miss fine details. We introduce a new differentiable layer for 3D data deformation and use it in DeformNet to learn a model for 3D reconstruction-through-deformation. DeformNet takes an image input, searches the nearest shape template from a database, and deforms the template to match the query image. We evaluate our approach on the ShapeNet dataset and show that - (a) the Free-Form Deformation layer is a powerful new building block for Deep Learning models that manipulate 3D data (b) DeformNet uses this FFD layer combined with shape retrieval for smooth and detail-preserving 3D reconstruction of qualitatively plausible point clouds with respect to a single query image (c) compared to other state-of-the-art 3D reconstruction methods, DeformNet quantitatively matches or outperforms their benchmarks by significant margins. For more information, visit: https://deformnet-site.github.io/DeformNet-website/ . <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1708.04672v1-abstract-full').style.display = 'none'; document.getElementById('1708.04672v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 August, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 9 figures, NIPS</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1707.08608">arXiv:1707.08608</a> <span> [<a href="https://arxiv.org/pdf/1707.08608">pdf</a>, <a href="https://arxiv.org/ps/1707.08608">ps</a>, <a href="https://arxiv.org/format/1707.08608">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Gradient-based Inference for Networks with Output Constraints </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lee%2C+J+Y">Jay Yoon Lee</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+S+V">Sanket Vaibhav Mehta</a>, <a href="/search/cs?searchtype=author&query=Wick%2C+M">Michael Wick</a>, <a href="/search/cs?searchtype=author&query=Tristan%2C+J">Jean-Baptiste Tristan</a>, <a href="/search/cs?searchtype=author&query=Carbonell%2C+J">Jaime Carbonell</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1707.08608v3-abstract-short" style="display: inline;"> Practitioners apply neural networks to increasingly complex problems in natural language processing, such as syntactic parsing and semantic role labeling that have rich output structures. Many such structured-prediction problems require deterministic constraints on the output values; for example, in sequence-to-sequence syntactic parsing, we require that the sequential outputs encode valid trees.… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.08608v3-abstract-full').style.display = 'inline'; document.getElementById('1707.08608v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1707.08608v3-abstract-full" style="display: none;"> Practitioners apply neural networks to increasingly complex problems in natural language processing, such as syntactic parsing and semantic role labeling that have rich output structures. Many such structured-prediction problems require deterministic constraints on the output values; for example, in sequence-to-sequence syntactic parsing, we require that the sequential outputs encode valid trees. While hidden units might capture such properties, the network is not always able to learn such constraints from the training data alone, and practitioners must then resort to post-processing. In this paper, we present an inference method for neural networks that enforces deterministic constraints on outputs without performing rule-based post-processing or expensive discrete search. Instead, in the spirit of gradient-based training, we enforce constraints with gradient-based inference (GBI): for each input at test-time, we nudge continuous model weights until the network's unconstrained inference procedure generates an output that satisfies the constraints. We study the efficacy of GBI on three tasks with hard constraints: semantic role labeling, syntactic parsing, and sequence transduction. In each case, the algorithm not only satisfies constraints but improves accuracy, even when the underlying network is state-of-the-art. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.08608v3-abstract-full').style.display = 'none'; document.getElementById('1707.08608v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">AAAI 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1703.06570">arXiv:1703.06570</a> <span> [<a href="https://arxiv.org/pdf/1703.06570">pdf</a>, <a href="https://arxiv.org/ps/1703.06570">ps</a>, <a href="https://arxiv.org/format/1703.06570">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Logic in Computer Science">cs.LO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.4204/EPTCS.244.3">10.4204/EPTCS.244.3 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Modelling, Verification, and Comparative Performance Analysis of the B.A.T.M.A.N. Protocol </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chaudhary%2C+K">Kaylash Chaudhary</a>, <a href="/search/cs?searchtype=author&query=Fehnker%2C+A">Ansgar Fehnker</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+V">Vinay Mehta</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1703.06570v1-abstract-short" style="display: inline;"> This paper considers on a network routing protocol known as Better Approach to Mobile Ad hoc Networks (B.A.T.M.A.N.). The protocol serves two aims: first, to discover all bidirectional links, and second, to identify the best-next-hop for every other node in the network. A key element is that each node will flood the network at regular intervals with so-called originator messages. This paper desc… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.06570v1-abstract-full').style.display = 'inline'; document.getElementById('1703.06570v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1703.06570v1-abstract-full" style="display: none;"> This paper considers on a network routing protocol known as Better Approach to Mobile Ad hoc Networks (B.A.T.M.A.N.). The protocol serves two aims: first, to discover all bidirectional links, and second, to identify the best-next-hop for every other node in the network. A key element is that each node will flood the network at regular intervals with so-called originator messages. This paper describes in detail a formalisation of the B.A.T.M.A.N. protocol. This exercise revealed several ambiguities and inconsistencies in the RFC. We developed two models. The first implements, if possible, a literal reading of the RFC, while the second model tries to be closer to the underlying concepts. The alternative model is in some places less restrictive, and rebroadcasts more often when it helps route discovery, and will on the other hand drop more messages that might interfere with the process. We verify for a basic untimed model that both interpretations ensure loop-freedom, bidirectional link discovery, and route-discovery. We use simulation of a timed model to compare the performance and found that both models are comparable when it comes to the time and number of messages needed for discovering routes. However, the alternative model identifies a significantly lower number of suboptimal routes, and thus improves on the literal interpretation of the RFC. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.06570v1-abstract-full').style.display = 'none'; document.getElementById('1703.06570v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">In Proceedings MARS 2017, arXiv:1703.05812</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> C.2.2; D.2.4 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> EPTCS 244, 2017, pp. 53-65 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1312.2526">arXiv:1312.2526</a> <span> [<a href="https://arxiv.org/pdf/1312.2526">pdf</a>, <a href="https://arxiv.org/ps/1312.2526">ps</a>, <a href="https://arxiv.org/format/1312.2526">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Connectivity maintenance by robotic Mobile Ad-hoc NETwork </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+V+K">Vaibhav Kumar Mehta</a>, <a href="/search/cs?searchtype=author&query=Arrichiello%2C+F">Filippo Arrichiello</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1312.2526v1-abstract-short" style="display: inline;"> The problem of maintaining a wireless communication link between a fixed base station and an autonomous agent by means of a team of mobile robots is addressed in this work. Such problem can be of interest for search and rescue missions in post disaster scenario where the autonomous agent can be used for remote monitoring and first hand knowledge of the aftermath, while the mobile robots can be use… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1312.2526v1-abstract-full').style.display = 'inline'; document.getElementById('1312.2526v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1312.2526v1-abstract-full" style="display: none;"> The problem of maintaining a wireless communication link between a fixed base station and an autonomous agent by means of a team of mobile robots is addressed in this work. Such problem can be of interest for search and rescue missions in post disaster scenario where the autonomous agent can be used for remote monitoring and first hand knowledge of the aftermath, while the mobile robots can be used to provide the agent the possibility to dynamically send its collected information to an external base station. To study the problem, a distributed multi-robot system with wifi communication capabilities has been developed and used to implement a Mobile Ad-hoc NETwork (MANET) to guarantee the required multi-hop communication. None of the robots of the team possess the knowledge of agent's movement, neither they hold a pre-assigned position in the ad-hoc network but they adapt with respect to the dynamic environmental situations. This adaptation only requires the robots to have the knowledge of their position and the possibility to exchange such information with their one-hop neighbours. Robots' motion is achieved by implementing a behavioural control, namely the Null-Space based Behavioural control, embedding the collective mission to achieve the required self-configuration. Validation of the approach is performed by means of demanding experimental tests involving five ground mobile robots capable of self localization and dynamic obstacle avoidance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1312.2526v1-abstract-full').style.display = 'none'; document.getElementById('1312.2526v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 December, 2013; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2013. </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>