CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;42 of 42 results for author: <span class="mathjax">Chauhan, J</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Chauhan%2C+J">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Chauhan, J"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Chauhan%2C+J&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Chauhan, J"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.12367">arXiv:2410.12367</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.12367">pdf</a>, <a href="https://arxiv.org/ps/2410.12367">ps</a>, <a href="https://arxiv.org/format/2410.12367">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Statistics Theory">math.ST</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Methodology">stat.ME</span> </div> </div> <p class="title is-5 mathjax"> Adaptive and Stratified Subsampling Techniques for High Dimensional Non-Standard Data Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mittal%2C+P">Prateek Mittal</a>, <a href="/search/cs?searchtype=author&amp;query=Dalmotra%2C+J">Jai Dalmotra</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Joohi Chauhan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.12367v1-abstract-short" style="display: inline;"> This paper addresses the challenge of estimating high-dimensional parameters in non-standard data environments, where traditional methods often falter due to issues such as heavy-tailed distributions, data contamination, and dependent observations. We propose robust subsampling techniques, specifically Adaptive Importance Sampling (AIS) and Stratified Subsampling, designed to enhance the reliabili&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12367v1-abstract-full').style.display = 'inline'; document.getElementById('2410.12367v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.12367v1-abstract-full" style="display: none;"> This paper addresses the challenge of estimating high-dimensional parameters in non-standard data environments, where traditional methods often falter due to issues such as heavy-tailed distributions, data contamination, and dependent observations. We propose robust subsampling techniques, specifically Adaptive Importance Sampling (AIS) and Stratified Subsampling, designed to enhance the reliability and efficiency of parameter estimation. Under some clearly outlined conditions, we establish consistency and asymptotic normality for the proposed estimators, providing non-asymptotic error bounds that quantify their performance. Our theoretical foundations are complemented by controlled experiments demonstrating the superiority of our methods over conventional approaches. By bridging the gap between theory and practice, this work offers significant contributions to robust statistical estimation, paving the way for advancements in various applied domains. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12367v1-abstract-full').style.display = 'none'; document.getElementById('2410.12367v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.16148">arXiv:2406.16148</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.16148">pdf</a>, <a href="https://arxiv.org/format/2406.16148">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Towards Open Respiratory Acoustic Foundation Models: Pretraining and Benchmarking </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuwei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yu Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Rizos%2C+G">Georgios Rizos</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Mosuily%2C+M">Mohammed Mosuily</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.16148v3-abstract-short" style="display: inline;"> Respiratory audio, such as coughing and breathing sounds, has predictive power for a wide range of healthcare applications, yet is currently under-explored. The main problem for those applications arises from the difficulty in collecting large labeled task-specific data for model development. Generalizable respiratory acoustic foundation models pretrained with unlabeled data would offer appealing&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.16148v3-abstract-full').style.display = 'inline'; document.getElementById('2406.16148v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.16148v3-abstract-full" style="display: none;"> Respiratory audio, such as coughing and breathing sounds, has predictive power for a wide range of healthcare applications, yet is currently under-explored. The main problem for those applications arises from the difficulty in collecting large labeled task-specific data for model development. Generalizable respiratory acoustic foundation models pretrained with unlabeled data would offer appealing advantages and possibly unlock this impasse. However, given the safety-critical nature of healthcare applications, it is pivotal to also ensure openness and replicability for any proposed foundation model solution. To this end, we introduce OPERA, an OPEn Respiratory Acoustic foundation model pretraining and benchmarking system, as the first approach answering this need. We curate large-scale respiratory audio datasets (~136K samples, over 400 hours), pretrain three pioneering foundation models, and build a benchmark consisting of 19 downstream respiratory health tasks for evaluation. Our pretrained models demonstrate superior performance (against existing acoustic models pretrained with general audio on 16 out of 19 tasks) and generalizability (to unseen datasets and new respiratory audio modalities). This highlights the great promise of respiratory acoustic foundation models and encourages more studies using OPERA as an open resource to accelerate research on respiratory audio for health. The system is accessible from https://github.com/evelyn0414/OPERA. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.16148v3-abstract-full').style.display = 'none'; document.getElementById('2406.16148v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted by NeurIPS 2024 Track Datasets and Benchmarks</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.17004">arXiv:2312.17004</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.17004">pdf</a>, <a href="https://arxiv.org/format/2312.17004">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Continual Learning in Medical Image Analysis: A Comprehensive Review of Recent Advancements and Future Prospects </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kumari%2C+P">Pratibha Kumari</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Joohi Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Bozorgpour%2C+A">Afshin Bozorgpour</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+B">Boqiang Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Azad%2C+R">Reza Azad</a>, <a href="/search/cs?searchtype=author&amp;query=Merhof%2C+D">Dorit Merhof</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.17004v4-abstract-short" style="display: inline;"> Medical imaging analysis has witnessed remarkable advancements even surpassing human-level performance in recent years, driven by the rapid development of advanced deep-learning algorithms. However, when the inference dataset slightly differs from what the model has seen during one-time training, the model performance is greatly compromised. The situation requires restarting the training process u&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.17004v4-abstract-full').style.display = 'inline'; document.getElementById('2312.17004v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.17004v4-abstract-full" style="display: none;"> Medical imaging analysis has witnessed remarkable advancements even surpassing human-level performance in recent years, driven by the rapid development of advanced deep-learning algorithms. However, when the inference dataset slightly differs from what the model has seen during one-time training, the model performance is greatly compromised. The situation requires restarting the training process using both the old and the new data which is computationally costly, does not align with the human learning process, and imposes storage constraints and privacy concerns. Alternatively, continual learning has emerged as a crucial approach for developing unified and sustainable deep models to deal with new classes, tasks, and the drifting nature of data in non-stationary environments for various application areas. Continual learning techniques enable models to adapt and accumulate knowledge over time, which is essential for maintaining performance on evolving datasets and novel tasks. This systematic review paper provides a comprehensive overview of the state-of-the-art in continual learning techniques applied to medical imaging analysis. We present an extensive survey of existing research, covering topics including catastrophic forgetting, data drifts, stability, and plasticity requirements. Further, an in-depth discussion of key components of a continual learning framework such as continual learning scenarios, techniques, evaluation schemes, and metrics is provided. Continual learning techniques encompass various categories, including rehearsal, regularization, architectural, and hybrid strategies. We assess the popularity and applicability of continual learning categories in various medical sub-fields like radiology and histopathology... <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.17004v4-abstract-full').style.display = 'none'; document.getElementById('2312.17004v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.11420">arXiv:2311.11420</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.11420">pdf</a>, <a href="https://arxiv.org/format/2311.11420">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LifeLearner: Hardware-Aware Meta Continual Learning System for Embedded Computing Platforms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kwon%2C+Y+D">Young D. Kwon</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+H">Hong Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Venieris%2C+S+I">Stylianos I. Venieris</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.11420v1-abstract-short" style="display: inline;"> Continual Learning (CL) allows applications such as user personalization and household robots to learn on the fly and adapt to context. This is an important feature when context, actions, and users change. However, enabling CL on resource-constrained embedded systems is challenging due to the limited labeled data, memory, and computing capacity. In this paper, we propose LifeLearner, a hardware-aw&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.11420v1-abstract-full').style.display = 'inline'; document.getElementById('2311.11420v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.11420v1-abstract-full" style="display: none;"> Continual Learning (CL) allows applications such as user personalization and household robots to learn on the fly and adapt to context. This is an important feature when context, actions, and users change. However, enabling CL on resource-constrained embedded systems is challenging due to the limited labeled data, memory, and computing capacity. In this paper, we propose LifeLearner, a hardware-aware meta continual learning system that drastically optimizes system resources (lower memory, latency, energy consumption) while ensuring high accuracy. Specifically, we (1) exploit meta-learning and rehearsal strategies to explicitly cope with data scarcity issues and ensure high accuracy, (2) effectively combine lossless and lossy compression to significantly reduce the resource requirements of CL and rehearsal samples, and (3) developed hardware-aware system on embedded and IoT platforms considering the hardware characteristics. As a result, LifeLearner achieves near-optimal CL performance, falling short by only 2.8% on accuracy compared to an Oracle baseline. With respect to the state-of-the-art (SOTA) Meta CL method, LifeLearner drastically reduces the memory footprint (by 178.7x), end-to-end latency by 80.8-94.2%, and energy consumption by 80.9-94.2%. In addition, we successfully deployed LifeLearner on two edge devices and a microcontroller unit, thereby enabling efficient CL on resource-constrained platforms where it would be impractical to run SOTA methods and the far-reaching deployment of adaptable CL in a ubiquitous manner. Code is available at https://github.com/theyoungkwon/LifeLearner. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.11420v1-abstract-full').style.display = 'none'; document.getElementById('2311.11420v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at SenSys 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.11707">arXiv:2310.11707</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.11707">pdf</a>, <a href="https://arxiv.org/format/2310.11707">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Learning under Label Proportions for Text Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jatin Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xiaoxuan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wei Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.11707v1-abstract-short" style="display: inline;"> We present one of the preliminary NLP works under the challenging setup of Learning from Label Proportions (LLP), where the data is provided in an aggregate form called bags and only the proportion of samples in each class as the ground truth. This setup is inline with the desired characteristics of training models under Privacy settings and Weakly supervision. By characterizing some irregularitie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.11707v1-abstract-full').style.display = 'inline'; document.getElementById('2310.11707v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.11707v1-abstract-full" style="display: none;"> We present one of the preliminary NLP works under the challenging setup of Learning from Label Proportions (LLP), where the data is provided in an aggregate form called bags and only the proportion of samples in each class as the ground truth. This setup is inline with the desired characteristics of training models under Privacy settings and Weakly supervision. By characterizing some irregularities of the most widely used baseline technique DLLP, we propose a novel formulation that is also robust. This is accompanied with a learnability result that provides a generalization bound under LLP. Combining this formulation with a self-supervised objective, our method achieves better results as compared to the baselines in almost 87% of the experimental configurations which include large scale models for both long and short range texts across multiple metrics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.11707v1-abstract-full').style.display = 'none'; document.getElementById('2310.11707v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted as long paper in Findings of EMNLP 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.07535">arXiv:2310.07535</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.07535">pdf</a>, <a href="https://arxiv.org/format/2310.07535">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Fairness under Covariate Shift: Improving Fairness-Accuracy tradeoff with few Unlabeled Test Samples </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Havaldar%2C+S">Shreyas Havaldar</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jatin Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Shanmugam%2C+K">Karthikeyan Shanmugam</a>, <a href="/search/cs?searchtype=author&amp;query=Nandy%2C+J">Jay Nandy</a>, <a href="/search/cs?searchtype=author&amp;query=Raghuveer%2C+A">Aravindan Raghuveer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.07535v3-abstract-short" style="display: inline;"> Covariate shift in the test data is a common practical phenomena that can significantly downgrade both the accuracy and the fairness performance of the model. Ensuring fairness across different sensitive groups under covariate shift is of paramount importance due to societal implications like criminal justice. We operate in the unsupervised regime where only a small set of unlabeled test samples a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.07535v3-abstract-full').style.display = 'inline'; document.getElementById('2310.07535v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.07535v3-abstract-full" style="display: none;"> Covariate shift in the test data is a common practical phenomena that can significantly downgrade both the accuracy and the fairness performance of the model. Ensuring fairness across different sensitive groups under covariate shift is of paramount importance due to societal implications like criminal justice. We operate in the unsupervised regime where only a small set of unlabeled test samples along with a labeled training set is available. Towards improving fairness under this highly challenging yet realistic scenario, we make three contributions. First is a novel composite weighted entropy based objective for prediction accuracy which is optimized along with a representation matching loss for fairness. We experimentally verify that optimizing with our loss formulation outperforms a number of state-of-the-art baselines in the pareto sense with respect to the fairness-accuracy tradeoff on several standard datasets. Our second contribution is a new setting we term Asymmetric Covariate Shift that, to the best of our knowledge, has not been studied before. Asymmetric covariate shift occurs when distribution of covariates of one group shifts significantly compared to the other groups and this happens when a dominant group is over-represented. While this setting is extremely challenging for current baselines, We show that our proposed method significantly outperforms them. Our third contribution is theoretical, where we show that our weighted entropy term along with prediction loss on the training set approximates test loss under covariate shift. Empirically and through formal sample complexity bounds, we show that this approximation to the unseen test loss does not depend on importance sampling variance which affects many other baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.07535v3-abstract-full').style.display = 'none'; document.getElementById('2310.07535v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at The 38th Annual AAAI Conference on Artificial Intelligence (AAAI 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.00115">arXiv:2310.00115</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.00115">pdf</a>, <a href="https://arxiv.org/format/2310.00115">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Learning Over Molecular Conformer Ensembles: Datasets and Benchmarks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+Y">Yanqiao Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Hwang%2C+J">Jeehyun Hwang</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+K">Keir Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Nan%2C+B">Bozhao Nan</a>, <a href="/search/cs?searchtype=author&amp;query=Stenfors%2C+B">Brock Stenfors</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+Y">Yuanqi Du</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jatin Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+O">Olaf Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Isayev%2C+O">Olexandr Isayev</a>, <a href="/search/cs?searchtype=author&amp;query=Coley%2C+C+W">Connor W. Coley</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Y">Yizhou Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wei Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.00115v2-abstract-short" style="display: inline;"> Molecular Representation Learning (MRL) has proven impactful in numerous biochemical applications such as drug discovery and enzyme design. While Graph Neural Networks (GNNs) are effective at learning molecular representations from a 2D molecular graph or a single 3D structure, existing works often overlook the flexible nature of molecules, which continuously interconvert across conformations via&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.00115v2-abstract-full').style.display = 'inline'; document.getElementById('2310.00115v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.00115v2-abstract-full" style="display: none;"> Molecular Representation Learning (MRL) has proven impactful in numerous biochemical applications such as drug discovery and enzyme design. While Graph Neural Networks (GNNs) are effective at learning molecular representations from a 2D molecular graph or a single 3D structure, existing works often overlook the flexible nature of molecules, which continuously interconvert across conformations via chemical bond rotations and minor vibrational perturbations. To better account for molecular flexibility, some recent works formulate MRL as an ensemble learning problem, focusing on explicitly learning from a set of conformer structures. However, most of these studies have limited datasets, tasks, and models. In this work, we introduce the first MoleculAR Conformer Ensemble Learning (MARCEL) benchmark to thoroughly evaluate the potential of learning on conformer ensembles and suggest promising research directions. MARCEL includes four datasets covering diverse molecule- and reaction-level properties of chemically diverse molecules including organocatalysts and transition-metal catalysts, extending beyond the scope of common GNN benchmarks that are confined to drug-like molecules. In addition, we conduct a comprehensive empirical study, which benchmarks representative 1D, 2D, and 3D molecular representation learning models, along with two strategies that explicitly incorporate conformer ensembles into 3D MRL models. Our findings reveal that direct learning from an accessible conformer space can improve performance on a variety of tasks and models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.00115v2-abstract-full').style.display = 'none'; document.getElementById('2310.00115v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICLR 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.02562">arXiv:2308.02562</a> <span>&nbsp;&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Food Classification using Joint Representation of Visual and Textual Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mittal%2C+P">Prateek Mittal</a>, <a href="/search/cs?searchtype=author&amp;query=Goyal%2C+P">Puneet Goyal</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Joohi Chauhan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.02562v2-abstract-short" style="display: inline;"> Food classification is an important task in health care. In this work, we propose a multimodal classification framework that uses the modified version of EfficientNet with the Mish activation function for image classification, and the traditional BERT transformer-based network is used for text classification. The proposed network and the other state-of-the-art methods are evaluated on a large open&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.02562v2-abstract-full').style.display = 'inline'; document.getElementById('2308.02562v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.02562v2-abstract-full" style="display: none;"> Food classification is an important task in health care. In this work, we propose a multimodal classification framework that uses the modified version of EfficientNet with the Mish activation function for image classification, and the traditional BERT transformer-based network is used for text classification. The proposed network and the other state-of-the-art methods are evaluated on a large open-source dataset, UPMC Food-101. The experimental results show that the proposed network outperforms the other methods, a significant difference of 11.57% and 6.34% in accuracy is observed for image and text classification, respectively, when compared with the second-best performing method. We also compared the performance in terms of accuracy, precision, and recall for text classification using both machine learning and deep learning-based models. The comparative analysis from the prediction results of both images and text demonstrated the efficiency and robustness of the proposed approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.02562v2-abstract-full').style.display = 'none'; document.getElementById('2308.02562v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Updated results and discussions to be posted and some sections needed to be expanded</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.09988">arXiv:2307.09988</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.09988">pdf</a>, <a href="https://arxiv.org/format/2307.09988">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> TinyTrain: Resource-Aware Task-Adaptive Sparse Training of DNNs at the Data-Scarce Edge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kwon%2C+Y+D">Young D. Kwon</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+R">Rui Li</a>, <a href="/search/cs?searchtype=author&amp;query=Venieris%2C+S+I">Stylianos I. Venieris</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Lane%2C+N+D">Nicholas D. Lane</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.09988v2-abstract-short" style="display: inline;"> On-device training is essential for user personalisation and privacy. With the pervasiveness of IoT devices and microcontroller units (MCUs), this task becomes more challenging due to the constrained memory and compute resources, and the limited availability of labelled user data. Nonetheless, prior works neglect the data scarcity issue, require excessively long training time (e.g. a few hours), o&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.09988v2-abstract-full').style.display = 'inline'; document.getElementById('2307.09988v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.09988v2-abstract-full" style="display: none;"> On-device training is essential for user personalisation and privacy. With the pervasiveness of IoT devices and microcontroller units (MCUs), this task becomes more challenging due to the constrained memory and compute resources, and the limited availability of labelled user data. Nonetheless, prior works neglect the data scarcity issue, require excessively long training time (e.g. a few hours), or induce substantial accuracy loss (&gt;10%). In this paper, we propose TinyTrain, an on-device training approach that drastically reduces training time by selectively updating parts of the model and explicitly coping with data scarcity. TinyTrain introduces a task-adaptive sparse-update method that dynamically selects the layer/channel to update based on a multi-objective criterion that jointly captures user data, the memory, and the compute capabilities of the target device, leading to high accuracy on unseen tasks with reduced computation and memory footprint. TinyTrain outperforms vanilla fine-tuning of the entire network by 3.6-5.0% in accuracy, while reducing the backward-pass memory and computation cost by up to 1,098x and 7.68x, respectively. Targeting broadly used real-world edge devices, TinyTrain achieves 9.5x faster and 3.5x more energy-efficient training over status-quo approaches, and 2.23x smaller memory footprint than SOTA methods, while remaining within the 1 MB memory envelope of MCU-grade platforms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.09988v2-abstract-full').style.display = 'none'; document.getElementById('2307.09988v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ICML 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.18787">arXiv:2305.18787</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.18787">pdf</a>, <a href="https://arxiv.org/format/2305.18787">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Universality and Limitations of Prompt Tuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yihan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jatin Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Hsieh%2C+C">Cho-Jui Hsieh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.18787v2-abstract-short" style="display: inline;"> Despite the demonstrated empirical efficacy of prompt tuning to adapt a pretrained language model for a new task, the theoretical underpinnings of the difference between &#34;tuning parameters before the input&#34; against &#34;the tuning of model weights&#34; are limited. We thus take one of the first steps to understand the role of soft-prompt tuning for transformer-based architectures. By considering a general&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.18787v2-abstract-full').style.display = 'inline'; document.getElementById('2305.18787v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.18787v2-abstract-full" style="display: none;"> Despite the demonstrated empirical efficacy of prompt tuning to adapt a pretrained language model for a new task, the theoretical underpinnings of the difference between &#34;tuning parameters before the input&#34; against &#34;the tuning of model weights&#34; are limited. We thus take one of the first steps to understand the role of soft-prompt tuning for transformer-based architectures. By considering a general purpose architecture, we analyze prompt tuning from the lens of both: universal approximation and limitations with finite-depth fixed-weight pretrained transformers for continuous-valued functions. Our universality result guarantees the existence of a strong transformer with a prompt to approximate any sequence-to-sequence function in the set of Lipschitz functions. The limitations of prompt tuning for limited-depth transformers are first proved by constructing a set of datasets, that cannot be memorized by a prompt of any length for a given single encoder layer. We also provide a lower bound on the required number of tunable prompt parameters and compare the result with the number of parameters required for a low-rank update (based on LoRA) for a single-layer setting. We finally extend our analysis to multi-layer settings by providing sufficient conditions under which the transformer can at best learn datasets from invertible functions only. Our theoretical claims are also corroborated by empirical results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.18787v2-abstract-full').style.display = 'none'; document.getElementById('2305.18787v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.12209">arXiv:2301.12209</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2301.12209">pdf</a>, <a href="https://arxiv.org/format/2301.12209">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> who is snoring? snore based user recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+S">Shenghao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.12209v1-abstract-short" style="display: inline;"> Snoring is one of the most prominent symptoms of Obstructive Sleep Apnea-Hypopnea Syndrome (OSAH), a highly prevalent disease that causes repetitive collapse and cessation of the upper airway. Thus, accurate snore sound monitoring and analysis is crucial. However, the traditional monitoring method polysomnography (PSG) requires the patients to stay at a sleep clinic for the whole night and be conn&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.12209v1-abstract-full').style.display = 'inline'; document.getElementById('2301.12209v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.12209v1-abstract-full" style="display: none;"> Snoring is one of the most prominent symptoms of Obstructive Sleep Apnea-Hypopnea Syndrome (OSAH), a highly prevalent disease that causes repetitive collapse and cessation of the upper airway. Thus, accurate snore sound monitoring and analysis is crucial. However, the traditional monitoring method polysomnography (PSG) requires the patients to stay at a sleep clinic for the whole night and be connected to many pieces of equipment. An alternative and less invasive way is passive monitoring using a smartphone at home or in the clinical settings. But, there is a challenge: the environment may be shared by people such that the raw audio may contain the snore activities of the bed partner or other person. False capturing of the snoring activity could lead to critical false alarms and misdiagnosis of the patients. To address this limitation, we propose a hypothesis that snore sound contains unique identity information which can be used for user recognition. We analyzed various machine learning models: Gaussian Mixture Model (GMM), GMM-UBM (Universial Background Model), and a Deep Neural Network (DNN) on MPSSC - an open source snoring dataset to evaluate the validity of our hypothesis. Our results are promising as we achieved around 90% accuracy in identification and verification tasks. This work marks the first step towards understanding the practicality of snore based user monitoring to enable multiple healthcare applicaitons. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.12209v1-abstract-full').style.display = 'none'; document.getElementById('2301.12209v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.04511">arXiv:2209.04511</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.04511">pdf</a>, <a href="https://arxiv.org/format/2209.04511">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Pitfalls and Guidelines for Using Time-Based Git Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Flint%2C+S+W">Samuel W. Flint</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jigyasa Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Dyer%2C+R">Robert Dyer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.04511v1-abstract-short" style="display: inline;"> Many software engineering research papers rely on time-based data (e.g., commit timestamps, issue report creation/update/close dates, release dates). Like most real-world data however, time-based data is often dirty. To date, there are no studies that quantify how frequently such data is used by the software engineering research community, or investigate sources of and quantify how often such data&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.04511v1-abstract-full').style.display = 'inline'; document.getElementById('2209.04511v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.04511v1-abstract-full" style="display: none;"> Many software engineering research papers rely on time-based data (e.g., commit timestamps, issue report creation/update/close dates, release dates). Like most real-world data however, time-based data is often dirty. To date, there are no studies that quantify how frequently such data is used by the software engineering research community, or investigate sources of and quantify how often such data is dirty. Depending on the research task and method used, including such dirty data could affect the research results. This paper presents an extended survey of papers that utilize time-based data, published in the Mining Software Repositories (MSR) conference series. Out of the 754 technical track and data papers published in MSR 2004--2021, we saw at least 290 (38%) papers utilized time-based data. We also observed that most time-based data used in research papers comes in the form of Git commits, often from GitHub. Based on those results, we then used the Boa and Software Heritage infrastructures to help identify and quantify several sources of dirty Git timestamp data. Finally we provide guidelines/best practices for researchers utilizing time-based data from Git repositories. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.04511v1-abstract-full').style.display = 'none'; document.getElementById('2209.04511v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication in Empirical Software Engineering (EMSE). arXiv admin note: substantial text overlap with arXiv:2103.11339</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.01817">arXiv:2209.01817</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.01817">pdf</a>, <a href="https://arxiv.org/format/2209.01817">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3540250.3549158">10.1145/3540250.3549158 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> An Exploratory Study on the Predominant Programming Paradigms in Python Code </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dyer%2C+R">Robert Dyer</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jigyasa Chauhan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.01817v1-abstract-short" style="display: inline;"> Python is a multi-paradigm programming language that fully supports object-oriented (OO) programming. The language allows writing code in a non-procedural imperative manner, using procedures, using classes, or in a functional style. To date, no one has studied what paradigm(s), if any, are predominant in Python code and projects. In this work, we first define a technique to classify Python files i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.01817v1-abstract-full').style.display = 'inline'; document.getElementById('2209.01817v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.01817v1-abstract-full" style="display: none;"> Python is a multi-paradigm programming language that fully supports object-oriented (OO) programming. The language allows writing code in a non-procedural imperative manner, using procedures, using classes, or in a functional style. To date, no one has studied what paradigm(s), if any, are predominant in Python code and projects. In this work, we first define a technique to classify Python files into predominant paradigm(s). We then automate our approach and evaluate it against human judgements, showing over 80% agreement. We then analyze over 100k open-source Python projects, automatically classifying each source file and investigating the paradigm distributions. The results indicate Python developers tend to heavily favor OO features. We also observed a positive correlation between OO and procedural paradigms and the size of the project. And despite few files or projects being predominantly functional, we still found many functional feature uses. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.01817v1-abstract-full').style.display = 'none'; document.getElementById('2209.01817v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to the ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering (ESEC/FSE 2022)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> D.3.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.12626">arXiv:2206.12626</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.12626">pdf</a>, <a href="https://arxiv.org/format/2206.12626">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3534678.3539394">10.1145/3534678.3539394 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Multi-Variate Time Series Forecasting on Variable Subsets </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jatin Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Raghuveer%2C+A">Aravindan Raghuveer</a>, <a href="/search/cs?searchtype=author&amp;query=Saket%2C+R">Rishi Saket</a>, <a href="/search/cs?searchtype=author&amp;query=Nandy%2C+J">Jay Nandy</a>, <a href="/search/cs?searchtype=author&amp;query=Ravindran%2C+B">Balaraman Ravindran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.12626v1-abstract-short" style="display: inline;"> We formulate a new inference task in the domain of multivariate time series forecasting (MTSF), called Variable Subset Forecast (VSF), where only a small subset of the variables is available during inference. Variables are absent during inference because of long-term data loss (eg. sensor failures) or high -&gt; low-resource domain shift between train / test. To the best of our knowledge, robustness&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.12626v1-abstract-full').style.display = 'inline'; document.getElementById('2206.12626v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.12626v1-abstract-full" style="display: none;"> We formulate a new inference task in the domain of multivariate time series forecasting (MTSF), called Variable Subset Forecast (VSF), where only a small subset of the variables is available during inference. Variables are absent during inference because of long-term data loss (eg. sensor failures) or high -&gt; low-resource domain shift between train / test. To the best of our knowledge, robustness of MTSF models in presence of such failures, has not been studied in the literature. Through extensive evaluation, we first show that the performance of state of the art methods degrade significantly in the VSF setting. We propose a non-parametric, wrapper technique that can be applied on top any existing forecast models. Through systematic experiments across 4 datasets and 5 forecast models, we show that our technique is able to recover close to 95\% performance of the models even when only 15\% of the original variables are present. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.12626v1-abstract-full').style.display = 'none'; document.getElementById('2206.12626v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> ACM SIGKDD 2022 Research Track </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.00953">arXiv:2205.00953</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.00953">pdf</a>, <a href="https://arxiv.org/format/2205.00953">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> BERTops: Studying BERT Representations under a Topological Lens </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jatin Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Kaul%2C+M">Manohar Kaul</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.00953v2-abstract-short" style="display: inline;"> Proposing scoring functions to effectively understand, analyze and learn various properties of high dimensional hidden representations of large-scale transformer models like BERT can be a challenging task. In this work, we explore a new direction by studying the topological features of BERT hidden representations using persistent homology (PH). We propose a novel scoring function named &#34;persistenc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.00953v2-abstract-full').style.display = 'inline'; document.getElementById('2205.00953v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.00953v2-abstract-full" style="display: none;"> Proposing scoring functions to effectively understand, analyze and learn various properties of high dimensional hidden representations of large-scale transformer models like BERT can be a challenging task. In this work, we explore a new direction by studying the topological features of BERT hidden representations using persistent homology (PH). We propose a novel scoring function named &#34;persistence scoring function (PSF)&#34; which: (i) accurately captures the homology of the high-dimensional hidden representations and correlates well with the test set accuracy of a wide range of datasets and outperforms existing scoring metrics, (ii) captures interesting post fine-tuning &#34;per-class&#34; level properties from both qualitative and quantitative viewpoints, (iii) is more stable to perturbations as compared to the baseline functions, which makes it a very robust proxy, and (iv) finally, also serves as a predictor of the attack success rates for a wide category of black-box and white-box adversarial attack methods. Our extensive correlation experiments demonstrate the practical utility of PSF on various NLP tasks relevant to BERT. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.00953v2-abstract-full').style.display = 'none'; document.getElementById('2205.00953v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IJCNN 2022 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.03794">arXiv:2203.03794</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.03794">pdf</a>, <a href="https://arxiv.org/format/2203.03794">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> YONO: Modeling Multiple Heterogeneous Neural Networks on Microcontrollers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kwon%2C+Y+D">Young D. Kwon</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.03794v1-abstract-short" style="display: inline;"> With the advancement of Deep Neural Networks (DNN) and large amounts of sensor data from Internet of Things (IoT) systems, the research community has worked to reduce the computational and resource demands of DNN to compute on low-resourced microcontrollers (MCUs). However, most of the current work in embedded deep learning focuses on solving a single task efficiently, while the multi-tasking natu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.03794v1-abstract-full').style.display = 'inline'; document.getElementById('2203.03794v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.03794v1-abstract-full" style="display: none;"> With the advancement of Deep Neural Networks (DNN) and large amounts of sensor data from Internet of Things (IoT) systems, the research community has worked to reduce the computational and resource demands of DNN to compute on low-resourced microcontrollers (MCUs). However, most of the current work in embedded deep learning focuses on solving a single task efficiently, while the multi-tasking nature and applications of IoT devices demand systems that can handle a diverse range of tasks (activity, voice, and context recognition) with input from a variety of sensors, simultaneously. In this paper, we propose YONO, a product quantization (PQ) based approach that compresses multiple heterogeneous models and enables in-memory model execution and switching for dissimilar multi-task learning on MCUs. We first adopt PQ to learn codebooks that store weights of different models. Also, we propose a novel network optimization and heuristics to maximize the compression rate and minimize the accuracy loss. Then, we develop an online component of YONO for efficient model execution and switching between multiple tasks on an MCU at run time without relying on an external storage device. YONO shows remarkable performance as it can compress multiple heterogeneous models with negligible or no loss of accuracy up to 12.37$\times$. Besides, YONO&#39;s online component enables an efficient execution (latency of 16-159 ms per operation) and reduces model loading/switching latency and energy consumption by 93.3-94.5% and 93.9-95.0%, respectively, compared to external storage access. Interestingly, YONO can compress various architectures trained with datasets that were not shown during YONO&#39;s offline codebook learning phase showing the generalizability of our method. To summarize, YONO shows great potential and opens further doors to enable multi-task learning systems on extremely resource-constrained devices. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.03794v1-abstract-full').style.display = 'none'; document.getElementById('2203.03794v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at IPSN 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.10100">arXiv:2202.10100</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.10100">pdf</a>, <a href="https://arxiv.org/format/2202.10100">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> Enabling On-Device Smartphone GPU based Training: Lessons Learned </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Das%2C+A">Anish Das</a>, <a href="/search/cs?searchtype=author&amp;query=Kwon%2C+Y+D">Young D. Kwon</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.10100v1-abstract-short" style="display: inline;"> Deep Learning (DL) has shown impressive performance in many mobile applications. Most existing works have focused on reducing the computational and resource overheads of running Deep Neural Networks (DNN) inference on resource-constrained mobile devices. However, the other aspect of DNN operations, i.e. training (forward and backward passes) on smartphone GPUs, has received little attention thus f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.10100v1-abstract-full').style.display = 'inline'; document.getElementById('2202.10100v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.10100v1-abstract-full" style="display: none;"> Deep Learning (DL) has shown impressive performance in many mobile applications. Most existing works have focused on reducing the computational and resource overheads of running Deep Neural Networks (DNN) inference on resource-constrained mobile devices. However, the other aspect of DNN operations, i.e. training (forward and backward passes) on smartphone GPUs, has received little attention thus far. To this end, we conduct an initial analysis to examine the feasibility of on-device training on smartphones using mobile GPUs. We first employ the open-source mobile DL framework (MNN) and its OpenCL backend for running compute kernels on GPUs. Next, we observed that training on CPUs is much faster than on GPUs and identified two possible bottlenecks related to this observation: (i) computation and (ii) memory bottlenecks. To solve the computation bottleneck, we optimize the OpenCL backend&#39;s kernels, showing 2x improvements (40-70 GFLOPs) over CPUs (15-30 GFLOPs) on the Snapdragon 8 series processors. However, we find that the full DNN training is still much slower on GPUs than on CPUs, indicating that memory bottleneck plays a significant role in the lower performance of GPU over CPU. The data movement takes almost 91% of training time due to the low bandwidth. Lastly, based on the findings and failures during our investigation, we present limitations and practical guidelines for future directions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.10100v1-abstract-full').style.display = 'none'; document.getElementById('2202.10100v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.08981">arXiv:2202.08981</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.08981">pdf</a>, <a href="https://arxiv.org/format/2202.08981">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> A Summary of the ComParE COVID-19 Challenges </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Coppock%2C+H">Harry Coppock</a>, <a href="/search/cs?searchtype=author&amp;query=Akman%2C+A">Alican Akman</a>, <a href="/search/cs?searchtype=author&amp;query=Bergler%2C+C">Christian Bergler</a>, <a href="/search/cs?searchtype=author&amp;query=Gerczuk%2C+M">Maurice Gerczuk</a>, <a href="/search/cs?searchtype=author&amp;query=Brown%2C+C">Chlo毛 Brown</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Grammenos%2C+A">Andreas Grammenos</a>, <a href="/search/cs?searchtype=author&amp;query=Hasthanasombat%2C+A">Apinan Hasthanasombat</a>, <a href="/search/cs?searchtype=author&amp;query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Cicuta%2C+P">Pietro Cicuta</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&amp;query=Amiriparian%2C+S">Shahin Amiriparian</a>, <a href="/search/cs?searchtype=author&amp;query=Baird%2C+A">Alice Baird</a>, <a href="/search/cs?searchtype=author&amp;query=Stappen%2C+L">Lukas Stappen</a>, <a href="/search/cs?searchtype=author&amp;query=Ottl%2C+S">Sandra Ottl</a>, <a href="/search/cs?searchtype=author&amp;query=Tzirakis%2C+P">Panagiotis Tzirakis</a>, <a href="/search/cs?searchtype=author&amp;query=Batliner%2C+A">Anton Batliner</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a>, <a href="/search/cs?searchtype=author&amp;query=Schuller%2C+B+W">Bj枚rn W. Schuller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.08981v1-abstract-short" style="display: inline;"> The COVID-19 pandemic has caused massive humanitarian and economic damage. Teams of scientists from a broad range of disciplines have searched for methods to help governments and communities combat the disease. One avenue from the machine learning field which has been explored is the prospect of a digital mass test which can detect COVID-19 from infected individuals&#39; respiratory sounds. We present&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.08981v1-abstract-full').style.display = 'inline'; document.getElementById('2202.08981v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.08981v1-abstract-full" style="display: none;"> The COVID-19 pandemic has caused massive humanitarian and economic damage. Teams of scientists from a broad range of disciplines have searched for methods to help governments and communities combat the disease. One avenue from the machine learning field which has been explored is the prospect of a digital mass test which can detect COVID-19 from infected individuals&#39; respiratory sounds. We present a summary of the results from the INTERSPEECH 2021 Computational Paralinguistics Challenges: COVID-19 Cough, (CCS) and COVID-19 Speech, (CSS). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.08981v1-abstract-full').style.display = 'none'; document.getElementById('2202.08981v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 13 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.01232">arXiv:2201.01232</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.01232">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.2196/37004">10.2196/37004 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Exploring Longitudinal Cough, Breath, and Voice Data for COVID-19 Progression Prediction via Sequential Deep Learning: Model Development and Validation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dang%2C+T">Ting Dang</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&amp;query=Bondareva%2C+E">Erika Bondareva</a>, <a href="/search/cs?searchtype=author&amp;query=Siegele-Brown%2C+C">Chlo毛 Siegele-Brown</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Grammenos%2C+A">Andreas Grammenos</a>, <a href="/search/cs?searchtype=author&amp;query=Hasthanasombat%2C+A">Apinan Hasthanasombat</a>, <a href="/search/cs?searchtype=author&amp;query=Floto%2C+A">Andres Floto</a>, <a href="/search/cs?searchtype=author&amp;query=Cicuta%2C+P">Pietro Cicuta</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.01232v2-abstract-short" style="display: inline;"> Recent work has shown the potential of using audio data (eg, cough, breathing, and voice) in the screening for COVID-19. However, these approaches only focus on one-off detection and detect the infection given the current audio sample, but do not monitor disease progression in COVID-19. Limited exploration has been put forward to continuously monitor COVID-19 progression, especially recovery, thro&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.01232v2-abstract-full').style.display = 'inline'; document.getElementById('2201.01232v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.01232v2-abstract-full" style="display: none;"> Recent work has shown the potential of using audio data (eg, cough, breathing, and voice) in the screening for COVID-19. However, these approaches only focus on one-off detection and detect the infection given the current audio sample, but do not monitor disease progression in COVID-19. Limited exploration has been put forward to continuously monitor COVID-19 progression, especially recovery, through longitudinal audio data. Tracking disease progression characteristics could lead to more timely treatment. The primary objective of this study is to explore the potential of longitudinal audio samples over time for COVID-19 progression prediction and, especially, recovery trend prediction using sequential deep learning techniques. Crowdsourced respiratory audio data, including breathing, cough, and voice samples, from 212 individuals over 5-385 days were analyzed. We developed a deep learning-enabled tracking tool using gated recurrent units (GRUs) to detect COVID-19 progression by exploring the audio dynamics of the individuals&#39; historical audio biomarkers. The investigation comprised 2 parts: (1) COVID-19 detection in terms of positive and negative (healthy) tests, and (2) longitudinal disease progression prediction over time in terms of probability of positive tests. The strong performance for COVID-19 detection, yielding an AUROC of 0.79, a sensitivity of 0.75, and a specificity of 0.71 supported the effectiveness of the approach compared to methods that do not leverage longitudinal dynamics. We further examined the predicted disease progression trajectory, displaying high consistency with test results with a correlation of 0.75 in the test cohort and 0.86 in a subset of the test cohort who reported recovery. Our findings suggest that monitoring COVID-19 evolution via longitudinal audio data has potential in the tracking of individuals&#39; disease progression and recovery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.01232v2-abstract-full').style.display = 'none'; document.getElementById('2201.01232v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Updated title. Revised format according to journal requirements</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.13290">arXiv:2110.13290</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.13290">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Performance">cs.PF</span> </div> </div> <p class="title is-5 mathjax"> Exploring System Performance of Continual Learning for Mobile and Embedded Sensing Applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kwon%2C+Y+D">Young D. Kwon</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Kumar%2C+A">Abhishek Kumar</a>, <a href="/search/cs?searchtype=author&amp;query=Hui%2C+P">Pan Hui</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.13290v2-abstract-short" style="display: inline;"> Continual learning approaches help deep neural network models adapt and learn incrementally by trying to solve catastrophic forgetting. However, whether these existing approaches, applied traditionally to image-based tasks, work with the same efficacy to the sequential time series data generated by mobile or embedded sensing systems remains an unanswered question. To address this void, we conduc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.13290v2-abstract-full').style.display = 'inline'; document.getElementById('2110.13290v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.13290v2-abstract-full" style="display: none;"> Continual learning approaches help deep neural network models adapt and learn incrementally by trying to solve catastrophic forgetting. However, whether these existing approaches, applied traditionally to image-based tasks, work with the same efficacy to the sequential time series data generated by mobile or embedded sensing systems remains an unanswered question. To address this void, we conduct the first comprehensive empirical study that quantifies the performance of three predominant continual learning schemes (i.e., regularization, replay, and replay with examples) on six datasets from three mobile and embedded sensing applications in a range of scenarios having different learning complexities. More specifically, we implement an end-to-end continual learning framework on edge devices. Then we investigate the generalizability, trade-offs between performance, storage, computational costs, and memory footprint of different continual learning methods. Our findings suggest that replay with exemplars-based schemes such as iCaRL has the best performance trade-offs, even in complex scenarios, at the expense of some storage space (few MBs) for training examples (1% to 5%). We also demonstrate for the first time that it is feasible and practical to run continual learning on-device with a limited memory budget. In particular, the latency on two types of mobile and embedded devices suggests that both incremental learning time (few seconds - 4 minutes) and training time (1 - 75 minutes) across datasets are acceptable, as training could happen on the device when the embedded device is charging thereby ensuring complete data privacy. Finally, we present some guidelines for practitioners who want to apply a continual learning paradigm for mobile sensing tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.13290v2-abstract-full').style.display = 'none'; document.getElementById('2110.13290v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at SEC 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.13205">arXiv:2110.13205</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.13205">pdf</a>, <a href="https://arxiv.org/format/2110.13205">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> A Probabilistic Framework for Knowledge Graph Data Augmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jatin Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+P">Priyanshu Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Minervini%2C+P">Pasquale Minervini</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.13205v1-abstract-short" style="display: inline;"> We present NNMFAug, a probabilistic framework to perform data augmentation for the task of knowledge graph completion to counter the problem of data scarcity, which can enhance the learning process of neural link predictors. Our method can generate potentially diverse triples with the advantage of being efficient and scalable as well as agnostic to the choice of the link prediction model and datas&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.13205v1-abstract-full').style.display = 'inline'; document.getElementById('2110.13205v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.13205v1-abstract-full" style="display: none;"> We present NNMFAug, a probabilistic framework to perform data augmentation for the task of knowledge graph completion to counter the problem of data scarcity, which can enhance the learning process of neural link predictors. Our method can generate potentially diverse triples with the advantage of being efficient and scalable as well as agnostic to the choice of the link prediction model and dataset used. Experiments and analysis done on popular models and benchmarks show that NNMFAug can bring notable improvements over the baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.13205v1-abstract-full').style.display = 'none'; document.getElementById('2110.13205v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.02114">arXiv:2107.02114</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.02114">pdf</a>, <a href="https://arxiv.org/format/2107.02114">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Semi-supervised Learning for Dense Object Detection in Retail Scenes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jaydeep Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Varadarajan%2C+S">Srikrishna Varadarajan</a>, <a href="/search/cs?searchtype=author&amp;query=Srivastava%2C+M+M">Muktabh Mayank Srivastava</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.02114v1-abstract-short" style="display: inline;"> Retail scenes usually contain densely packed high number of objects in each image. Standard object detection techniques use fully supervised training methodology. This is highly costly as annotating a large dense retail object detection dataset involves an order of magnitude more effort compared to standard datasets. Hence, we propose semi-supervised learning to effectively use the large amount of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.02114v1-abstract-full').style.display = 'inline'; document.getElementById('2107.02114v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.02114v1-abstract-full" style="display: none;"> Retail scenes usually contain densely packed high number of objects in each image. Standard object detection techniques use fully supervised training methodology. This is highly costly as annotating a large dense retail object detection dataset involves an order of magnitude more effort compared to standard datasets. Hence, we propose semi-supervised learning to effectively use the large amount of unlabeled data available in the retail domain. We adapt a popular self supervised method called noisy student initially proposed for object classification to the task of dense object detection. We show that using unlabeled data with the noisy student training methodology, we can improve the state of the art on precise detection of objects in densely packed retail scenes. We also show that performance of the model increases as you increase the amount of unlabeled data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.02114v1-abstract-full').style.display = 'none'; document.getElementById('2107.02114v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.15523">arXiv:2106.15523</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.15523">pdf</a>, <a href="https://arxiv.org/format/2106.15523">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Sounds of COVID-19: exploring realistic performance of audio-based digital testing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&amp;query=Bondareva%2C+E">Erika Bondareva</a>, <a href="/search/cs?searchtype=author&amp;query=Brown%2C+C">Chlo毛 Brown</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Dang%2C+T">Ting Dang</a>, <a href="/search/cs?searchtype=author&amp;query=Grammenos%2C+A">Andreas Grammenos</a>, <a href="/search/cs?searchtype=author&amp;query=Hasthanasombat%2C+A">Apinan Hasthanasombat</a>, <a href="/search/cs?searchtype=author&amp;query=Floto%2C+A">Andres Floto</a>, <a href="/search/cs?searchtype=author&amp;query=Cicuta%2C+P">Pietro Cicuta</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.15523v1-abstract-short" style="display: inline;"> Researchers have been battling with the question of how we can identify Coronavirus disease (COVID-19) cases efficiently, affordably and at scale. Recent work has shown how audio based approaches, which collect respiratory audio data (cough, breathing and voice) can be used for testing, however there is a lack of exploration of how biases and methodological decisions impact these tools&#39; performanc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.15523v1-abstract-full').style.display = 'inline'; document.getElementById('2106.15523v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.15523v1-abstract-full" style="display: none;"> Researchers have been battling with the question of how we can identify Coronavirus disease (COVID-19) cases efficiently, affordably and at scale. Recent work has shown how audio based approaches, which collect respiratory audio data (cough, breathing and voice) can be used for testing, however there is a lack of exploration of how biases and methodological decisions impact these tools&#39; performance in practice. In this paper, we explore the realistic performance of audio-based digital testing of COVID-19. To investigate this, we collected a large crowdsourced respiratory audio dataset through a mobile app, alongside recent COVID-19 test result and symptoms intended as a ground truth. Within the collected dataset, we selected 5,240 samples from 2,478 participants and split them into different participant-independent sets for model development and validation. Among these, we controlled for potential confounding factors (such as demographics and language). The unbiased model takes features extracted from breathing, coughs, and voice signals as predictors and yields an AUC-ROC of 0.71 (95\% CI: 0.65$-$0.77). We further explore different unbalanced distributions to show how biases and participant splits affect performance. Finally, we discuss how the realistic model presented could be integrated in clinical practice to realize continuous, ubiquitous, sustainable and affordable testing at population scale. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.15523v1-abstract-full').style.display = 'none'; document.getElementById('2106.15523v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.07268">arXiv:2106.07268</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.07268">pdf</a>, <a href="https://arxiv.org/format/2106.07268">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> FastICARL: Fast Incremental Classifier and Representation Learning with Efficient Budget Allocation in Audio Sensing Applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kwon%2C+Y+D">Young D. Kwon</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.07268v2-abstract-short" style="display: inline;"> Various incremental learning (IL) approaches have been proposed to help deep learning models learn new tasks/classes continuously without forgetting what was learned previously (i.e., avoid catastrophic forgetting). With the growing number of deployed audio sensing applications that need to dynamically incorporate new tasks and changing input distribution from users, the ability of IL on-device be&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.07268v2-abstract-full').style.display = 'inline'; document.getElementById('2106.07268v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.07268v2-abstract-full" style="display: none;"> Various incremental learning (IL) approaches have been proposed to help deep learning models learn new tasks/classes continuously without forgetting what was learned previously (i.e., avoid catastrophic forgetting). With the growing number of deployed audio sensing applications that need to dynamically incorporate new tasks and changing input distribution from users, the ability of IL on-device becomes essential for both efficiency and user privacy. However, prior works suffer from high computational costs and storage demands which hinders the deployment of IL on-device. In this work, to overcome these limitations, we develop an end-to-end and on-device IL framework, FastICARL, that incorporates an exemplar-based IL and quantization in the context of audio-based applications. We first employ k-nearest-neighbor to reduce the latency of IL. Then, we jointly utilize a quantization technique to decrease the storage requirements of IL. We implement FastICARL on two types of mobile devices and demonstrate that FastICARL remarkably decreases the IL time up to 78-92% and the storage requirements by 2-4 times without sacrificing its performance. FastICARL enables complete on-device IL, ensuring user privacy as the user data does not need to leave the device. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.07268v2-abstract-full').style.display = 'none'; document.getElementById('2106.07268v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at INTERSPEECH 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.07047">arXiv:2106.07047</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.07047">pdf</a>, <a href="https://arxiv.org/format/2106.07047">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Target Model Agnostic Adversarial Attacks with Query Budgets on Language Understanding Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jatin Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Bhukar%2C+K">Karan Bhukar</a>, <a href="/search/cs?searchtype=author&amp;query=Kaul%2C+M">Manohar Kaul</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.07047v1-abstract-short" style="display: inline;"> Despite significant improvements in natural language understanding models with the advent of models like BERT and XLNet, these neural-network based classifiers are vulnerable to blackbox adversarial attacks, where the attacker is only allowed to query the target model outputs. We add two more realistic restrictions on the attack methods, namely limiting the number of queries allowed (query budget)&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.07047v1-abstract-full').style.display = 'inline'; document.getElementById('2106.07047v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.07047v1-abstract-full" style="display: none;"> Despite significant improvements in natural language understanding models with the advent of models like BERT and XLNet, these neural-network based classifiers are vulnerable to blackbox adversarial attacks, where the attacker is only allowed to query the target model outputs. We add two more realistic restrictions on the attack methods, namely limiting the number of queries allowed (query budget) and crafting attacks that easily transfer across different pre-trained models (transferability), which render previous attack models impractical and ineffective. Here, we propose a target model agnostic adversarial attack method with a high degree of attack transferability across the attacked models. Our empirical studies show that in comparison to baseline methods, our method generates highly transferable adversarial sentences under the restriction of limited query budgets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.07047v1-abstract-full').style.display = 'none'; document.getElementById('2106.07047v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.12399">arXiv:2105.12399</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2105.12399">pdf</a>, <a href="https://arxiv.org/format/2105.12399">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> SentEmojiBot: Empathising Conversations Generation with Emojis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ravi%2C+A">Akhilesh Ravi</a>, <a href="/search/cs?searchtype=author&amp;query=Yadav%2C+A">Amit Yadav</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jainish Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Dholakia%2C+J">Jatin Dholakia</a>, <a href="/search/cs?searchtype=author&amp;query=Jain%2C+N">Naman Jain</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+M">Mayank Singh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.12399v1-abstract-short" style="display: inline;"> The increasing use of dialogue agents makes it extremely desirable for them to understand and acknowledge the implied emotions to respond like humans with empathy. Chatbots using traditional techniques analyze emotions based on the context and meaning of the text and lack the understanding of emotions expressed through face. Emojis representing facial expressions present a promising way to express&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.12399v1-abstract-full').style.display = 'inline'; document.getElementById('2105.12399v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.12399v1-abstract-full" style="display: none;"> The increasing use of dialogue agents makes it extremely desirable for them to understand and acknowledge the implied emotions to respond like humans with empathy. Chatbots using traditional techniques analyze emotions based on the context and meaning of the text and lack the understanding of emotions expressed through face. Emojis representing facial expressions present a promising way to express emotions. However, none of the AI systems utilizes emojis for empathetic conversation generation. We propose, SentEmojiBot, based on the SentEmoji dataset, to generate empathetic conversations with a combination of emojis and text. Evaluation metrics show that the BERT-based model outperforms the vanilla transformer model. A user study indicates that the dialogues generated by our model were understandable and adding emojis improved empathetic traits in conversations by 9.8% <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.12399v1-abstract-full').style.display = 'none'; document.getElementById('2105.12399v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.11339">arXiv:2103.11339</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2103.11339">pdf</a>, <a href="https://arxiv.org/format/2103.11339">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Escaping the Time Pit: Pitfalls and Guidelines for Using Time-Based Git Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Flint%2C+S+W">Samuel W. Flint</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jigyasa Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Dyer%2C+R">Robert Dyer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.11339v1-abstract-short" style="display: inline;"> Many software engineering research papers rely on time-based data (e.g., commit timestamps, issue report creation/update/close dates, release dates). Like most real-world data however, time-based data is often dirty. To date, there are no studies that quantify how frequently such data is used by the software engineering research community, or investigate sources of and quantify how often such data&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.11339v1-abstract-full').style.display = 'inline'; document.getElementById('2103.11339v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.11339v1-abstract-full" style="display: none;"> Many software engineering research papers rely on time-based data (e.g., commit timestamps, issue report creation/update/close dates, release dates). Like most real-world data however, time-based data is often dirty. To date, there are no studies that quantify how frequently such data is used by the software engineering research community, or investigate sources of and quantify how often such data is dirty. Depending on the research task and method used, including such dirty data could affect the research results. This paper presents the first survey of papers that utilize time-based data, published in the Mining Software Repositories (MSR) conference series. Out of the 690 technical track and data papers published in MSR 2004--2020, we saw at least 35% of papers utilized time-based data. We then used the Boa and Software Heritage infrastructures to help identify and quantify several sources of dirty commit timestamp data. Finally we provide guidelines/best practices for researchers utilizing time-based data from Git repositories. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.11339v1-abstract-full').style.display = 'none'; document.getElementById('2103.11339v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to the 18th International Conference on Mining Software Repositories (MSR 2021)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.13468">arXiv:2102.13468</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.13468">pdf</a>, <a href="https://arxiv.org/format/2102.13468">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> </div> <p class="title is-5 mathjax"> The INTERSPEECH 2021 Computational Paralinguistics Challenge: COVID-19 Cough, COVID-19 Speech, Escalation &amp; Primates </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Schuller%2C+B+W">Bj枚rn W. Schuller</a>, <a href="/search/cs?searchtype=author&amp;query=Batliner%2C+A">Anton Batliner</a>, <a href="/search/cs?searchtype=author&amp;query=Bergler%2C+C">Christian Bergler</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&amp;query=Lefter%2C+I">Iulia Lefter</a>, <a href="/search/cs?searchtype=author&amp;query=Kaya%2C+H">Heysem Kaya</a>, <a href="/search/cs?searchtype=author&amp;query=Amiriparian%2C+S">Shahin Amiriparian</a>, <a href="/search/cs?searchtype=author&amp;query=Baird%2C+A">Alice Baird</a>, <a href="/search/cs?searchtype=author&amp;query=Stappen%2C+L">Lukas Stappen</a>, <a href="/search/cs?searchtype=author&amp;query=Ottl%2C+S">Sandra Ottl</a>, <a href="/search/cs?searchtype=author&amp;query=Gerczuk%2C+M">Maurice Gerczuk</a>, <a href="/search/cs?searchtype=author&amp;query=Tzirakis%2C+P">Panagiotis Tzirakis</a>, <a href="/search/cs?searchtype=author&amp;query=Brown%2C+C">Chlo毛 Brown</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Grammenos%2C+A">Andreas Grammenos</a>, <a href="/search/cs?searchtype=author&amp;query=Hasthanasombat%2C+A">Apinan Hasthanasombat</a>, <a href="/search/cs?searchtype=author&amp;query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Cicuta%2C+P">Pietro Cicuta</a>, <a href="/search/cs?searchtype=author&amp;query=Rothkrantz%2C+L+J+M">Leon J. M. Rothkrantz</a>, <a href="/search/cs?searchtype=author&amp;query=Zwerts%2C+J">Joeri Zwerts</a>, <a href="/search/cs?searchtype=author&amp;query=Treep%2C+J">Jelle Treep</a>, <a href="/search/cs?searchtype=author&amp;query=Kaandorp%2C+C">Casper Kaandorp</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.13468v1-abstract-short" style="display: inline;"> The INTERSPEECH 2021 Computational Paralinguistics Challenge addresses four different problems for the first time in a research competition under well-defined conditions: In the COVID-19 Cough and COVID-19 Speech Sub-Challenges, a binary classification on COVID-19 infection has to be made based on coughing sounds and speech; in the Escalation SubChallenge, a three-way assessment of the level of es&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.13468v1-abstract-full').style.display = 'inline'; document.getElementById('2102.13468v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.13468v1-abstract-full" style="display: none;"> The INTERSPEECH 2021 Computational Paralinguistics Challenge addresses four different problems for the first time in a research competition under well-defined conditions: In the COVID-19 Cough and COVID-19 Speech Sub-Challenges, a binary classification on COVID-19 infection has to be made based on coughing sounds and speech; in the Escalation SubChallenge, a three-way assessment of the level of escalation in a dialogue is featured; and in the Primates Sub-Challenge, four species vs background need to be classified. We describe the Sub-Challenges, baseline feature extraction, and classifiers based on the &#39;usual&#39; COMPARE and BoAW features as well as deep unsupervised representation learning using the AuDeep toolkit, and deep feature extraction from pre-trained CNNs using the Deep Spectrum toolkit; in addition, we add deep end-to-end sequential modelling, and partially linguistic analysis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.13468v1-abstract-full').style.display = 'none'; document.getElementById('2102.13468v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.7; I.5.0; J.3 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.05956">arXiv:2102.05956</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.05956">pdf</a>, <a href="https://arxiv.org/format/2102.05956">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> The Benefit of the Doubt: Uncertainty Aware Sensing for Edge Computing Platforms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Qendro%2C+L">Lorena Qendro</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Ramos%2C+A+G+C+P">Alberto Gil C. P. Ramos</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.05956v1-abstract-short" style="display: inline;"> Neural networks (NNs) lack measures of &#34;reliability&#34; estimation that would enable reasoning over their predictions. Despite the vital importance, especially in areas of human well-being and health, state-of-the-art uncertainty estimation techniques are computationally expensive when applied to resource-constrained devices. We propose an efficient framework for predictive uncertainty estimation in&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.05956v1-abstract-full').style.display = 'inline'; document.getElementById('2102.05956v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.05956v1-abstract-full" style="display: none;"> Neural networks (NNs) lack measures of &#34;reliability&#34; estimation that would enable reasoning over their predictions. Despite the vital importance, especially in areas of human well-being and health, state-of-the-art uncertainty estimation techniques are computationally expensive when applied to resource-constrained devices. We propose an efficient framework for predictive uncertainty estimation in NNs deployed on embedded edge systems with no need for fine-tuning or re-training strategies. To meet the energy and latency requirements of these embedded platforms the framework is built from the ground up to provide predictive uncertainty based only on one forward pass and a negligible amount of additional matrix multiplications with theoretically proven correctness. Our aim is to enable already trained deep learning models to generate uncertainty estimates on resource-limited devices at inference time focusing on classification tasks. This framework is founded on theoretical developments casting dropout training as approximate inference in Bayesian NNs. Our layerwise distribution approximation to the convolution layer cascades through the network, providing uncertainty estimates in one single run which ensures minimal overhead, especially compared with uncertainty techniques that require multiple forwards passes and an equal linear rise in energy and latency requirements making them unsuitable in practice. We demonstrate that it yields better performance and flexibility over previous work based on multilayer perceptrons to obtain uncertainty estimates. Our evaluation with mobile applications datasets shows that our approach not only obtains robust and accurate uncertainty estimations but also outperforms state-of-the-art methods in terms of systems performance, reducing energy consumption (up to 28x), keeping the memory overhead at a minimum while still improving accuracy (up to 16%). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.05956v1-abstract-full').style.display = 'none'; document.getElementById('2102.05956v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.05225">arXiv:2102.05225</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.05225">pdf</a>, <a href="https://arxiv.org/format/2102.05225">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ICASSP39728.2021.9414576">10.1109/ICASSP39728.2021.9414576 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Exploring Automatic COVID-19 Diagnosis via voice and symptoms from Crowdsourced Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&amp;query=Brown%2C+C">Chlo毛 Brown</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Grammenos%2C+A">Andreas Grammenos</a>, <a href="/search/cs?searchtype=author&amp;query=Hasthanasombat%2C+A">Apinan Hasthanasombat</a>, <a href="/search/cs?searchtype=author&amp;query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Cicuta%2C+P">Pietro Cicuta</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.05225v1-abstract-short" style="display: inline;"> The development of fast and accurate screening tools, which could facilitate testing and prevent more costly clinical tests, is key to the current pandemic of COVID-19. In this context, some initial work shows promise in detecting diagnostic signals of COVID-19 from audio sounds. In this paper, we propose a voice-based framework to automatically detect individuals who have tested positive for COVI&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.05225v1-abstract-full').style.display = 'inline'; document.getElementById('2102.05225v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.05225v1-abstract-full" style="display: none;"> The development of fast and accurate screening tools, which could facilitate testing and prevent more costly clinical tests, is key to the current pandemic of COVID-19. In this context, some initial work shows promise in detecting diagnostic signals of COVID-19 from audio sounds. In this paper, we propose a voice-based framework to automatically detect individuals who have tested positive for COVID-19. We evaluate the performance of the proposed framework on a subset of data crowdsourced from our app, containing 828 samples from 343 participants. By combining voice signals and reported symptoms, an AUC of $0.79$ has been attained, with a sensitivity of $0.68$ and a specificity of $0.82$. We hope that this study opens the door to rapid, low-cost, and convenient pre-screening tools to automatically detect the disease. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.05225v1-abstract-full').style.display = 'none'; document.getElementById('2102.05225v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 3 figures, 2 tables, Accepted for publication at ICASSP 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.06149">arXiv:2011.06149</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2011.06149">pdf</a>, <a href="https://arxiv.org/ps/2011.06149">ps</a>, <a href="https://arxiv.org/format/2011.06149">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Identifying Depressive Symptoms from Tweets: Figurative Language Enabled Multitask Learning Framework </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yadav%2C+S">Shweta Yadav</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jainish Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Sain%2C+J+P">Joy Prakash Sain</a>, <a href="/search/cs?searchtype=author&amp;query=Thirunarayan%2C+K">Krishnaprasad Thirunarayan</a>, <a href="/search/cs?searchtype=author&amp;query=Sheth%2C+A">Amit Sheth</a>, <a href="/search/cs?searchtype=author&amp;query=Schumm%2C+J">Jeremiah Schumm</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.06149v1-abstract-short" style="display: inline;"> Existing studies on using social media for deriving mental health status of users focus on the depression detection task. However, for case management and referral to psychiatrists, healthcare workers require practical and scalable depressive disorder screening and triage system. This study aims to design and evaluate a decision support system (DSS) to reliably determine the depressive triage leve&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.06149v1-abstract-full').style.display = 'inline'; document.getElementById('2011.06149v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.06149v1-abstract-full" style="display: none;"> Existing studies on using social media for deriving mental health status of users focus on the depression detection task. However, for case management and referral to psychiatrists, healthcare workers require practical and scalable depressive disorder screening and triage system. This study aims to design and evaluate a decision support system (DSS) to reliably determine the depressive triage level by capturing fine-grained depressive symptoms expressed in user tweets through the emulation of Patient Health Questionnaire-9 (PHQ-9) that is routinely used in clinical practice. The reliable detection of depressive symptoms from tweets is challenging because the 280-character limit on tweets incentivizes the use of creative artifacts in the utterances and figurative usage contributes to effective expression. We propose a novel BERT based robust multi-task learning framework to accurately identify the depressive symptoms using the auxiliary task of figurative usage detection. Specifically, our proposed novel task sharing mechanism, co-task aware attention, enables automatic selection of optimal information across the BERT layers and tasks by soft-sharing of parameters. Our results show that modeling figurative usage can demonstrably improve the model&#39;s robustness and reliability for distinguishing the depression symptoms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.06149v1-abstract-full').style.display = 'none'; document.getElementById('2011.06149v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication in COLING 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.05370">arXiv:2008.05370</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2008.05370">pdf</a>, <a href="https://arxiv.org/format/2008.05370">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> A First Step Towards On-Device Monitoring of Body Sounds in the Wild </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tailor%2C+S+A">Shyam A. Tailor</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.05370v1-abstract-short" style="display: inline;"> Body sounds provide rich information about the state of the human body and can be useful in many medical applications. Auscultation, the practice of listening to body sounds, has been used for centuries in respiratory and cardiac medicine to diagnose or track disease progression. To date, however, its use has been confined to clinical and highly controlled settings. Our work addresses this limitat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.05370v1-abstract-full').style.display = 'inline'; document.getElementById('2008.05370v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.05370v1-abstract-full" style="display: none;"> Body sounds provide rich information about the state of the human body and can be useful in many medical applications. Auscultation, the practice of listening to body sounds, has been used for centuries in respiratory and cardiac medicine to diagnose or track disease progression. To date, however, its use has been confined to clinical and highly controlled settings. Our work addresses this limitation: we devise a chest-mounted wearable for continuous monitoring of body sounds, that leverages data processing algorithms that run on-device. We concentrate on the detection of heart sounds to perform heart rate monitoring. To improve robustness to ambient noise and motion artefacts, our device uses an algorithm that explicitly segments the collected audio into the phases of the cardiac cycle. Our pilot study with 9 users demonstrates that it is possible to obtain heart rate estimates that are competitive with commercial heart rate monitors, with low enough power consumption for continuous use. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.05370v1-abstract-full').style.display = 'none'; document.getElementById('2008.05370v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 page version to appear at the WellComp Workshop at Ubicomp 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.05919">arXiv:2006.05919</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2006.05919">pdf</a>, <a href="https://arxiv.org/format/2006.05919">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3394486.3412865">10.1145/3394486.3412865 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Exploring Automatic Diagnosis of COVID-19 from Crowdsourced Respiratory Sound Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Brown%2C+C">Chlo毛 Brown</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Grammenos%2C+A">Andreas Grammenos</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&amp;query=Hasthanasombat%2C+A">Apinan Hasthanasombat</a>, <a href="/search/cs?searchtype=author&amp;query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Cicuta%2C+P">Pietro Cicuta</a>, <a href="/search/cs?searchtype=author&amp;query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.05919v3-abstract-short" style="display: inline;"> Audio signals generated by the human body (e.g., sighs, breathing, heart, digestion, vibration sounds) have routinely been used by clinicians as indicators to diagnose disease or assess disease progression. Until recently, such signals were usually collected through manual auscultation at scheduled visits. Research has now started to use digital technology to gather bodily sounds (e.g., from digit&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.05919v3-abstract-full').style.display = 'inline'; document.getElementById('2006.05919v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.05919v3-abstract-full" style="display: none;"> Audio signals generated by the human body (e.g., sighs, breathing, heart, digestion, vibration sounds) have routinely been used by clinicians as indicators to diagnose disease or assess disease progression. Until recently, such signals were usually collected through manual auscultation at scheduled visits. Research has now started to use digital technology to gather bodily sounds (e.g., from digital stethoscopes) for cardiovascular or respiratory examination, which could then be used for automatic analysis. Some initial work shows promise in detecting diagnostic signals of COVID-19 from voice and coughs. In this paper we describe our data analysis over a large-scale crowdsourced dataset of respiratory sounds collected to aid diagnosis of COVID-19. We use coughs and breathing to understand how discernible COVID-19 sounds are from those in asthma or healthy controls. Our results show that even a simple binary machine learning classifier is able to classify correctly healthy and COVID-19 sounds. We also show how we distinguish a user who tested positive for COVID-19 and has a cough from a healthy user with a cough, and users who tested positive for COVID-19 and have a cough from users with asthma and a cough. Our models achieve an AUC of above 80% across all tasks. These results are preliminary and only scratch the surface of the potential of this type of data and audio-based machine learning. This work opens the door to further investigation of how automatically analysed respiratory patterns could be used as pre-screening signals to aid COVID-19 diagnosis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.05919v3-abstract-full').style.display = 'none'; document.getElementById('2006.05919v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 6 figures, 2 tables, Accepted for publication at KDD&#39;20 (Health Day)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.09752">arXiv:2005.09752</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2005.09752">pdf</a>, <a href="https://arxiv.org/format/2005.09752">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Learning Representations using Spectral-Biased Random Walks on Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+C">Charu Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jatin Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Kaul%2C+M">Manohar Kaul</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.09752v2-abstract-short" style="display: inline;"> Several state-of-the-art neural graph embedding methods are based on short random walks (stochastic processes) because of their ease of computation, simplicity in capturing complex local graph properties, scalability, and interpretibility. In this work, we are interested in studying how much a probabilistic bias in this stochastic process affects the quality of the nodes picked by the process. In&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.09752v2-abstract-full').style.display = 'inline'; document.getElementById('2005.09752v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.09752v2-abstract-full" style="display: none;"> Several state-of-the-art neural graph embedding methods are based on short random walks (stochastic processes) because of their ease of computation, simplicity in capturing complex local graph properties, scalability, and interpretibility. In this work, we are interested in studying how much a probabilistic bias in this stochastic process affects the quality of the nodes picked by the process. In particular, our biased walk, with a certain probability, favors movement towards nodes whose neighborhoods bear a structural resemblance to the current node&#39;s neighborhood. We succinctly capture this neighborhood as a probability measure based on the spectrum of the node&#39;s neighborhood subgraph represented as a normalized laplacian matrix. We propose the use of a paragraph vector model with a novel Wasserstein regularization term. We empirically evaluate our approach against several state-of-the-art node embedding techniques on a wide variety of real-world datasets and demonstrate that our proposed method significantly improves upon existing methods on both link prediction and node classification tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.09752v2-abstract-full').style.display = 'none'; document.getElementById('2005.09752v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at IJCNN 2020: International Joint Conference on Neural Networks</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.12815">arXiv:2002.12815</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2002.12815">pdf</a>, <a href="https://arxiv.org/format/2002.12815">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Few-Shot Learning on Graphs via Super-Classes based on Graph Spectral Measures </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jatin Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Nathani%2C+D">Deepak Nathani</a>, <a href="/search/cs?searchtype=author&amp;query=Kaul%2C+M">Manohar Kaul</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.12815v1-abstract-short" style="display: inline;"> We propose to study the problem of few shot graph classification in graph neural networks (GNNs) to recognize unseen classes, given limited labeled graph examples. Despite several interesting GNN variants being proposed recently for node and graph classification tasks, when faced with scarce labeled examples in the few shot setting, these GNNs exhibit significant loss in classification performance&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.12815v1-abstract-full').style.display = 'inline'; document.getElementById('2002.12815v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.12815v1-abstract-full" style="display: none;"> We propose to study the problem of few shot graph classification in graph neural networks (GNNs) to recognize unseen classes, given limited labeled graph examples. Despite several interesting GNN variants being proposed recently for node and graph classification tasks, when faced with scarce labeled examples in the few shot setting, these GNNs exhibit significant loss in classification performance. Here, we present an approach where a probability measure is assigned to each graph based on the spectrum of the graphs normalized Laplacian. This enables us to accordingly cluster the graph base labels associated with each graph into super classes, where the Lp Wasserstein distance serves as our underlying distance metric. Subsequently, a super graph constructed based on the super classes is then fed to our proposed GNN framework which exploits the latent inter class relationships made explicit by the super graph to achieve better class label separation among the graphs. We conduct exhaustive empirical evaluations of our proposed method and show that it outperforms both the adaptation of state of the art graph classification methods to few shot scenario and our naive baseline GNNs. Additionally, we also extend and study the behavior of our method to semi supervised and active learning scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.12815v1-abstract-full').style.display = 'none'; document.getElementById('2002.12815v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages, 9 figures, Published as a conference paper at ICLR 2020</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> ICLR 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1906.01195">arXiv:1906.01195</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1906.01195">pdf</a>, <a href="https://arxiv.org/format/1906.01195">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nathani%2C+D">Deepak Nathani</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jatin Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+C">Charu Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Kaul%2C+M">Manohar Kaul</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1906.01195v1-abstract-short" style="display: inline;"> The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1906.01195v1-abstract-full').style.display = 'inline'; document.getElementById('1906.01195v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1906.01195v1-abstract-full" style="display: none;"> The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity&#39;s neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1906.01195v1-abstract-full').style.display = 'none'; document.getElementById('1906.01195v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 June, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted as long paper in ACL 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1711.02217">arXiv:1711.02217</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1711.02217">pdf</a>, <a href="https://arxiv.org/format/1711.02217">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Image Segmentation of Multi-Shaped Overlapping Objects </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Abhinav%2C+K">Kumar Abhinav</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J+S">Jaideep Singh Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Sarkar%2C+D">Debasis Sarkar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1711.02217v1-abstract-short" style="display: inline;"> In this work, we propose a new segmentation algorithm for images containing convex objects present in multiple shapes with a high degree of overlap. The proposed algorithm is carried out in two steps, first we identify the visible contours, segment them using concave points and finally group the segments belonging to the same object. The next step is to assign a shape identity to these grouped con&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1711.02217v1-abstract-full').style.display = 'inline'; document.getElementById('1711.02217v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1711.02217v1-abstract-full" style="display: none;"> In this work, we propose a new segmentation algorithm for images containing convex objects present in multiple shapes with a high degree of overlap. The proposed algorithm is carried out in two steps, first we identify the visible contours, segment them using concave points and finally group the segments belonging to the same object. The next step is to assign a shape identity to these grouped contour segments. For images containing objects in multiple shapes we begin first by identifying shape classes of the contours followed by assigning a shape entity to these classes. We provide a comprehensive experimentation of our algorithm on two crystal image datasets. One dataset comprises of images containing objects in multiple shapes overlapping each other and the other dataset contains standard images with objects present in a single shape. We test our algorithm against two baselines, with our proposed algorithm outperforming both the baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1711.02217v1-abstract-full').style.display = 'none'; document.getElementById('1711.02217v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at VISAPP 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1709.07626">arXiv:1709.07626</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1709.07626">pdf</a>, <a href="https://arxiv.org/format/1709.07626">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> BreathRNNet: Breathing Based Authentication on Resource-Constrained IoT Devices using RNNs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Seneviratne%2C+S">Suranga Seneviratne</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Y">Yining Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Misra%2C+A">Archan Misra</a>, <a href="/search/cs?searchtype=author&amp;query=Seneviratne%2C+A">Aruna Seneviratne</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+Y">Youngki Lee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1709.07626v1-abstract-short" style="display: inline;"> Recurrent neural networks (RNNs) have shown promising results in audio and speech processing applications due to their strong capabilities in modelling sequential data. In many applications, RNNs tend to outperform conventional models based on GMM/UBMs and i-vectors. Increasing popularity of IoT devices makes a strong case for implementing RNN based inferences for applications such as acoustics ba&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1709.07626v1-abstract-full').style.display = 'inline'; document.getElementById('1709.07626v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1709.07626v1-abstract-full" style="display: none;"> Recurrent neural networks (RNNs) have shown promising results in audio and speech processing applications due to their strong capabilities in modelling sequential data. In many applications, RNNs tend to outperform conventional models based on GMM/UBMs and i-vectors. Increasing popularity of IoT devices makes a strong case for implementing RNN based inferences for applications such as acoustics based authentication, voice commands, and edge analytics for smart homes. Nonetheless, the feasibility and performance of RNN based inferences on resources-constrained IoT devices remain largely unexplored. In this paper, we investigate the feasibility of using RNNs for an end-to-end authentication system based on breathing acoustics. We evaluate the performance of RNN models on three types of devices; smartphone, smartwatch, and Raspberry Pi and show that unlike CNN models, RNN models can be easily ported onto resource-constrained devices without a significant loss in accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1709.07626v1-abstract-full').style.display = 'none'; document.getElementById('1709.07626v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1610.09044">arXiv:1610.09044</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1610.09044">pdf</a>, <a href="https://arxiv.org/format/1610.09044">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> BehavioCog: An Observation Resistant Authentication Scheme </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+B+Z+H">Benjamin Zi Hao Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Asghar%2C+H+J">Hassan Jameel Asghar</a>, <a href="/search/cs?searchtype=author&amp;query=Chan%2C+J">Jonathan Chan</a>, <a href="/search/cs?searchtype=author&amp;query=Kaafar%2C+M+A">Mohamed Ali Kaafar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1610.09044v2-abstract-short" style="display: inline;"> We propose that by integrating behavioural biometric gestures---such as drawing figures on a touch screen---with challenge-response based cognitive authentication schemes, we can benefit from the properties of both. On the one hand, we can improve the usability of existing cognitive schemes by significantly reducing the number of challenge-response rounds by (partially) relying on the hardness of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1610.09044v2-abstract-full').style.display = 'inline'; document.getElementById('1610.09044v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1610.09044v2-abstract-full" style="display: none;"> We propose that by integrating behavioural biometric gestures---such as drawing figures on a touch screen---with challenge-response based cognitive authentication schemes, we can benefit from the properties of both. On the one hand, we can improve the usability of existing cognitive schemes by significantly reducing the number of challenge-response rounds by (partially) relying on the hardness of mimicking carefully designed behavioural biometric gestures. On the other hand, the observation resistant property of cognitive schemes provides an extra layer of protection for behavioural biometrics; an attacker is unsure if a failed impersonation is due to a biometric failure or a wrong response to the challenge. We design and develop an instantiation of such a &#34;hybrid&#34; scheme, and call it BehavioCog. To provide security close to a 4-digit PIN---one in 10,000 chance to impersonate---we only need two challenge-response rounds, which can be completed in less than 38 seconds on average (as estimated in our user study), with the advantage that unlike PINs or passwords, the scheme is secure under observation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1610.09044v2-abstract-full').style.display = 'none'; document.getElementById('1610.09044v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 October, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2016. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1608.04180">arXiv:1608.04180</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1608.04180">pdf</a>, <a href="https://arxiv.org/format/1608.04180">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Are wearable devices ready for HTTPS? Measuring the cost of secure communication protocols on wearable devices </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kolamunna%2C+H">Harini Kolamunna</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Y">Yining Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Thilakarathna%2C+K">Kanchana Thilakarathna</a>, <a href="/search/cs?searchtype=author&amp;query=Perino%2C+D">Diego Perino</a>, <a href="/search/cs?searchtype=author&amp;query=Makaroff%2C+D">Dwight Makaroff</a>, <a href="/search/cs?searchtype=author&amp;query=Seneviratne%2C+A">Aruna Seneviratne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1608.04180v2-abstract-short" style="display: inline;"> The majority of available wearable devices require communication with Internet servers for data analysis and storage, and rely on a paired smartphone to enable secure communication. However, wearable devices are mostly equipped with WiFi network interfaces, enabling direct communication with the Internet. Secure communication protocols should then run on these wearables itself, yet it is not clear&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1608.04180v2-abstract-full').style.display = 'inline'; document.getElementById('1608.04180v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1608.04180v2-abstract-full" style="display: none;"> The majority of available wearable devices require communication with Internet servers for data analysis and storage, and rely on a paired smartphone to enable secure communication. However, wearable devices are mostly equipped with WiFi network interfaces, enabling direct communication with the Internet. Secure communication protocols should then run on these wearables itself, yet it is not clear if they can be efficiently supported. In this paper, we show that wearable devices are ready for direct and secure Internet communication by means of experiments with both controlled and Internet servers. We observe that the overall energy consumption and communication delay can be reduced with direct Internet connection via WiFi from wearables compared to using smartphones as relays via Bluetooth. We also show that the additional HTTPS cost caused by TLS handshake and encryption is closely related to number of parallel connections, and has the same relative impact on wearables and smartphones. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1608.04180v2-abstract-full').style.display = 'none'; document.getElementById('1608.04180v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 December, 2016; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 August, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> C.4; C.2.2; C.2.0 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1507.01677">arXiv:1507.01677</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1507.01677">pdf</a>, <a href="https://arxiv.org/format/1507.01677">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> The Web for Under-Powered Mobile Devices: Lessons learned from Google Glass </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Kaafar%2C+M+A">Mohamed Ali Kaafar</a>, <a href="/search/cs?searchtype=author&amp;query=Mahanti%2C+A">Anirban Mahanti</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1507.01677v3-abstract-short" style="display: inline;"> This paper examines some of the potential challenges associated with enabling a seamless web experience on underpowered mobile devices such as Google Glass from the perspective of web content providers, device, and the network. We conducted experiments to study the impact of webpage complexity, individual web components and different application layer protocols while accessing webpages on the perf&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1507.01677v3-abstract-full').style.display = 'inline'; document.getElementById('1507.01677v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1507.01677v3-abstract-full" style="display: none;"> This paper examines some of the potential challenges associated with enabling a seamless web experience on underpowered mobile devices such as Google Glass from the perspective of web content providers, device, and the network. We conducted experiments to study the impact of webpage complexity, individual web components and different application layer protocols while accessing webpages on the performance of Glass browser, by measuring webpage load time, temperature variation and power consumption and compare it to a smartphone. Our findings suggest that (a) performance of Glass compared to a smartphone in terms of power consumption and webpage load time deteriorates with increasing webpage complexity (b) execution time for popular JavaScript benchmarks is about 3-8 times higher on Glass compared to a smartphone, (c) WebP is more energy efficient image format than JPEG and PNG, and (d) seven out of 50 websites studied are optimized for content delivery to Glass. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1507.01677v3-abstract-full').style.display = 'none'; document.getElementById('1507.01677v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 November, 2015; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 July, 2015; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2015. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> C.4 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1412.2855">arXiv:1412.2855</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1412.2855">pdf</a>, <a href="https://arxiv.org/format/1412.2855">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Gesture-based Continuous Authentication for Wearable Devices: the Google Glass Case </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Asghar%2C+H+J">Hassan Jameel Asghar</a>, <a href="/search/cs?searchtype=author&amp;query=Kaafar%2C+M+A">Mohamed Ali Kaafar</a>, <a href="/search/cs?searchtype=author&amp;query=Mahanti%2C+A">Anirban Mahanti</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1412.2855v8-abstract-short" style="display: inline;"> We study the feasibility of touch gesture behavioural biometrics for implicit authentication of users on a smartglass (Google Glass) by proposing a continuous authentication system using two classifiers: SVM with RBF kernel, and a new classifier based on Chebyshev&#39;s concentration inequality. Based on data collected from 30 volunteers, we show that such authentication is feasible both in terms of c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1412.2855v8-abstract-full').style.display = 'inline'; document.getElementById('1412.2855v8-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1412.2855v8-abstract-full" style="display: none;"> We study the feasibility of touch gesture behavioural biometrics for implicit authentication of users on a smartglass (Google Glass) by proposing a continuous authentication system using two classifiers: SVM with RBF kernel, and a new classifier based on Chebyshev&#39;s concentration inequality. Based on data collected from 30 volunteers, we show that such authentication is feasible both in terms of classification accuracy and computational load on smartglasses. We achieve a classification accuracy of up to 99% with only 75 training samples using behavioural biometric data from four different types of touch gestures. To show that our system can be generalized, we test its performance on touch data from smartphones and found the accuracy to be similar to smartglasses. Finally, our experiments on the permanence of gestures show that the negative impact of changing user behaviour with time on classification accuracy can be best alleviated by periodically replacing older training samples with new randomly chosen samples. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1412.2855v8-abstract-full').style.display = 'none'; document.getElementById('1412.2855v8-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 May, 2016; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 December, 2014; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2014. </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10