CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–33 of 33 results for author: <span class="mathjax">Spathis, D</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Spathis%2C+D">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Spathis, D"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Spathis%2C+D&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Spathis, D"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.07608">arXiv:2502.07608</a> <span> [<a href="https://arxiv.org/pdf/2502.07608">pdf</a>, <a href="https://arxiv.org/format/2502.07608">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Beyond Prompting: Time2Lang -- Bridging Time-Series Foundation Models and Large Language Models for Health Sensing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Pillai%2C+A">Arvind Pillai</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Nepal%2C+S">Subigya Nepal</a>, <a href="/search/cs?searchtype=author&query=Collins%2C+A+C">Amanda C Collins</a>, <a href="/search/cs?searchtype=author&query=Mackin%2C+D+M">Daniel M Mackin</a>, <a href="/search/cs?searchtype=author&query=Heinz%2C+M+V">Michael V Heinz</a>, <a href="/search/cs?searchtype=author&query=Griffin%2C+T+Z">Tess Z Griffin</a>, <a href="/search/cs?searchtype=author&query=Jacobson%2C+N+C">Nicholas C Jacobson</a>, <a href="/search/cs?searchtype=author&query=Campbell%2C+A">Andrew Campbell</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.07608v2-abstract-short" style="display: inline;"> Large language models (LLMs) show promise for health applications when combined with behavioral sensing data. Traditional approaches convert sensor data into text prompts, but this process is prone to errors, computationally expensive, and requires domain expertise. These challenges are particularly acute when processing extended time series data. While time series foundation models (TFMs) have re… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07608v2-abstract-full').style.display = 'inline'; document.getElementById('2502.07608v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.07608v2-abstract-full" style="display: none;"> Large language models (LLMs) show promise for health applications when combined with behavioral sensing data. Traditional approaches convert sensor data into text prompts, but this process is prone to errors, computationally expensive, and requires domain expertise. These challenges are particularly acute when processing extended time series data. While time series foundation models (TFMs) have recently emerged as powerful tools for learning representations from temporal data, bridging TFMs and LLMs remains challenging. Here, we present Time2Lang, a framework that directly maps TFM outputs to LLM representations without intermediate text conversion. Our approach first trains on synthetic data using periodicity prediction as a pretext task, followed by evaluation on mental health classification tasks. We validate Time2Lang on two longitudinal wearable and mobile sensing datasets: daily depression prediction using step count data (17,251 days from 256 participants) and flourishing classification based on conversation duration (46 participants over 10 weeks). Time2Lang maintains near constant inference times regardless of input length, unlike traditional prompting methods. The generated embeddings preserve essential time-series characteristics such as auto-correlation. Our results demonstrate that TFMs and LLMs can be effectively integrated while minimizing information loss and enabling performance transfer across these distinct modeling paradigms. To our knowledge, we are the first to integrate a TFM and an LLM for health, thus establishing a foundation for future research combining general-purpose large models for complex healthcare tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07608v2-abstract-full').style.display = 'none'; document.getElementById('2502.07608v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.23008">arXiv:2410.23008</a> <span> [<a href="https://arxiv.org/pdf/2410.23008">pdf</a>, <a href="https://arxiv.org/format/2410.23008">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> SoundCollage: Automated Discovery of New Classes in Audio Datasets </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Choi%2C+R">Ryuhaerang Choi</a>, <a href="/search/cs?searchtype=author&query=Chatterjee%2C+S">Soumyajit Chatterjee</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+S">Sung-Ju Lee</a>, <a href="/search/cs?searchtype=author&query=Kawsar%2C+F">Fahim Kawsar</a>, <a href="/search/cs?searchtype=author&query=Malekzadeh%2C+M">Mohammad Malekzadeh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.23008v2-abstract-short" style="display: inline;"> Developing new machine learning applications often requires the collection of new datasets. However, existing datasets may already contain relevant information to train models for new purposes. We propose SoundCollage: a framework to discover new classes within audio datasets by incorporating (1) an audio pre-processing pipeline to decompose different sounds in audio samples, and (2) an automated… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23008v2-abstract-full').style.display = 'inline'; document.getElementById('2410.23008v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.23008v2-abstract-full" style="display: none;"> Developing new machine learning applications often requires the collection of new datasets. However, existing datasets may already contain relevant information to train models for new purposes. We propose SoundCollage: a framework to discover new classes within audio datasets by incorporating (1) an audio pre-processing pipeline to decompose different sounds in audio samples, and (2) an automated model-based annotation mechanism to identify the discovered classes. Furthermore, we introduce the clarity measure to assess the coherence of the discovered classes for better training new downstream applications. Our evaluations show that the accuracy of downstream audio classifiers within discovered class samples and a held-out dataset improves over the baseline by up to 34.7% and 4.5%, respectively. These results highlight the potential of SoundCollage in making datasets reusable by labeling with newly discovered classes. To encourage further research in this area, we open-source our code at https://github.com/nokia-bell-labs/audio-class-discovery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23008v2-abstract-full').style.display = 'none'; document.getElementById('2410.23008v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 2 figures. Accepted in IEEE ICASSP 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.20542">arXiv:2410.20542</a> <span> [<a href="https://arxiv.org/pdf/2410.20542">pdf</a>, <a href="https://arxiv.org/format/2410.20542">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> PaPaGei: Open Foundation Models for Optical Physiological Signals </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Pillai%2C+A">Arvind Pillai</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Kawsar%2C+F">Fahim Kawsar</a>, <a href="/search/cs?searchtype=author&query=Malekzadeh%2C+M">Mohammad Malekzadeh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.20542v2-abstract-short" style="display: inline;"> Photoplethysmography (PPG) is the leading non-invasive technique for monitoring biosignals and cardiovascular health, with widespread adoption in both clinical settings and consumer wearable devices. While machine learning models trained on PPG signals have shown promise, they tend to be task-specific and struggle with generalization. Current research is limited by the use of single-device dataset… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20542v2-abstract-full').style.display = 'inline'; document.getElementById('2410.20542v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.20542v2-abstract-full" style="display: none;"> Photoplethysmography (PPG) is the leading non-invasive technique for monitoring biosignals and cardiovascular health, with widespread adoption in both clinical settings and consumer wearable devices. While machine learning models trained on PPG signals have shown promise, they tend to be task-specific and struggle with generalization. Current research is limited by the use of single-device datasets, insufficient exploration of out-of-domain generalization, and a lack of publicly available models, which hampers reproducibility. To address these limitations, we present PaPaGei, the first open foundation model for PPG signals. The model is pre-trained on over 57,000 hours of data, comprising 20 million unlabeled PPG segments from publicly available datasets. We introduce a novel representation learning approach that leverages domain knowledge of PPG signal morphology across individuals, enabling the capture of richer representations compared to traditional contrastive learning methods. We evaluate PaPaGei against state-of-the-art time-series foundation models and self-supervised learning benchmarks across 20 tasks from 10 diverse datasets, spanning cardiovascular health, sleep disorders, pregnancy monitoring, and wellbeing assessment. Our model demonstrates superior performance, improving classification and regression metrics by 6.3% and 2.9% respectively in at least 14 tasks. Notably, PaPaGei achieves these results while being more data- and parameter-efficient, outperforming models that are 70x larger. Beyond accuracy, we examine model robustness across different skin tones, establishing a benchmark for bias evaluation in future models. PaPaGei can serve as both a feature extractor and an encoder for multimodal models, opening up new opportunities for multimodal health monitoring. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20542v2-abstract-full').style.display = 'none'; document.getElementById('2410.20542v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at ICLR 2025. Improved version with new experiments and results. Code and models: https://github.com/nokia-bell-labs/papagei-foundation-model</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.10048">arXiv:2410.10048</a> <span> [<a href="https://arxiv.org/pdf/2410.10048">pdf</a>, <a href="https://arxiv.org/format/2410.10048">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> StatioCL: Contrastive Learning for Time Series via Non-Stationary and Temporal Contrast </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wu%2C+Y">Yu Wu</a>, <a href="/search/cs?searchtype=author&query=Dang%2C+T">Ting Dang</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Jia%2C+H">Hong Jia</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.10048v1-abstract-short" style="display: inline;"> Contrastive learning (CL) has emerged as a promising approach for representation learning in time series data by embedding similar pairs closely while distancing dissimilar ones. However, existing CL methods often introduce false negative pairs (FNPs) by neglecting inherent characteristics and then randomly selecting distinct segments as dissimilar pairs, leading to erroneous representation learni… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10048v1-abstract-full').style.display = 'inline'; document.getElementById('2410.10048v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.10048v1-abstract-full" style="display: none;"> Contrastive learning (CL) has emerged as a promising approach for representation learning in time series data by embedding similar pairs closely while distancing dissimilar ones. However, existing CL methods often introduce false negative pairs (FNPs) by neglecting inherent characteristics and then randomly selecting distinct segments as dissimilar pairs, leading to erroneous representation learning, reduced model performance, and overall inefficiency. To address these issues, we systematically define and categorize FNPs in time series into semantic false negative pairs and temporal false negative pairs for the first time: the former arising from overlooking similarities in label categories, which correlates with similarities in non-stationarity and the latter from neglecting temporal proximity. Moreover, we introduce StatioCL, a novel CL framework that captures non-stationarity and temporal dependency to mitigate both FNPs and rectify the inaccuracies in learned representations. By interpreting and differentiating non-stationary states, which reflect the correlation between trends or temporal dynamics with underlying data patterns, StatioCL effectively captures the semantic characteristics and eliminates semantic FNPs. Simultaneously, StatioCL establishes fine-grained similarity levels based on temporal dependencies to capture varying temporal proximity between segments and to mitigate temporal FNPs. Evaluated on real-world benchmark time series classification datasets, StatioCL demonstrates a substantial improvement over state-of-the-art CL methods, achieving a 2.9% increase in Recall and a 19.2% reduction in FNPs. Most importantly, StatioCL also shows enhanced data efficiency and robustness against label scarcity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10048v1-abstract-full').style.display = 'none'; document.getElementById('2410.10048v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in CIKM24</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.02361">arXiv:2406.02361</a> <span> [<a href="https://arxiv.org/pdf/2406.02361">pdf</a>, <a href="https://arxiv.org/format/2406.02361">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Using Self-supervised Learning Can Improve Model Fairness </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yfantidou%2C+S">Sofia Yfantidou</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Constantinides%2C+M">Marios Constantinides</a>, <a href="/search/cs?searchtype=author&query=Vakali%2C+A">Athena Vakali</a>, <a href="/search/cs?searchtype=author&query=Quercia%2C+D">Daniele Quercia</a>, <a href="/search/cs?searchtype=author&query=Kawsar%2C+F">Fahim Kawsar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.02361v1-abstract-short" style="display: inline;"> Self-supervised learning (SSL) has become the de facto training paradigm of large models, where pre-training is followed by supervised fine-tuning using domain-specific data and labels. Despite demonstrating comparable performance with supervised methods, comprehensive efforts to assess SSL's impact on machine learning fairness (i.e., performing equally on different demographic breakdowns) are lac… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.02361v1-abstract-full').style.display = 'inline'; document.getElementById('2406.02361v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.02361v1-abstract-full" style="display: none;"> Self-supervised learning (SSL) has become the de facto training paradigm of large models, where pre-training is followed by supervised fine-tuning using domain-specific data and labels. Despite demonstrating comparable performance with supervised methods, comprehensive efforts to assess SSL's impact on machine learning fairness (i.e., performing equally on different demographic breakdowns) are lacking. Hypothesizing that SSL models would learn more generic, hence less biased representations, this study explores the impact of pre-training and fine-tuning strategies on fairness. We introduce a fairness assessment framework for SSL, comprising five stages: defining dataset requirements, pre-training, fine-tuning with gradual unfreezing, assessing representation similarity conditioned on demographics, and establishing domain-specific evaluation processes. We evaluate our method's generalizability on three real-world human-centric datasets (i.e., MIMIC, MESA, and GLOBEM) by systematically comparing hundreds of SSL and fine-tuned models on various dimensions spanning from the intermediate representations to appropriate evaluation metrics. Our findings demonstrate that SSL can significantly improve model fairness, while maintaining performance on par with supervised methods-exhibiting up to a 30% increase in fairness with minimal loss in performance through self-supervision. We posit that such differences can be attributed to representation dissimilarities found between the best- and the worst-performing demographics across models-up to x13 greater for protected attributes with larger performance discrepancies between segments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.02361v1-abstract-full').style.display = 'none'; document.getElementById('2406.02361v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: text overlap with arXiv:2401.01640</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.10561">arXiv:2403.10561</a> <span> </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> A collection of the accepted papers for the Human-Centric Representation Learning workshop at AAAI 2024 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Saeed%2C+A">Aaqib Saeed</a>, <a href="/search/cs?searchtype=author&query=Etemad%2C+A">Ali Etemad</a>, <a href="/search/cs?searchtype=author&query=Tonekaboni%2C+S">Sana Tonekaboni</a>, <a href="/search/cs?searchtype=author&query=Laskaridis%2C+S">Stefanos Laskaridis</a>, <a href="/search/cs?searchtype=author&query=Deldari%2C+S">Shohreh Deldari</a>, <a href="/search/cs?searchtype=author&query=Tang%2C+C+I">Chi Ian Tang</a>, <a href="/search/cs?searchtype=author&query=Schwab%2C+P">Patrick Schwab</a>, <a href="/search/cs?searchtype=author&query=Tailor%2C+S">Shyam Tailor</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.10561v1-abstract-short" style="display: inline;"> This non-archival index is not complete, as some accepted papers chose to opt-out of inclusion. The list of all accepted papers is available on the workshop website. </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.10561v1-abstract-full" style="display: none;"> This non-archival index is not complete, as some accepted papers chose to opt-out of inclusion. The list of all accepted papers is available on the workshop website. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.10561v1-abstract-full').style.display = 'none'; document.getElementById('2403.10561v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.14107">arXiv:2401.14107</a> <span> [<a href="https://arxiv.org/pdf/2401.14107">pdf</a>, <a href="https://arxiv.org/format/2401.14107">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Learning under Label Noise through Few-Shot Human-in-the-Loop Refinement </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Saeed%2C+A">Aaqib Saeed</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Oh%2C+J">Jungwoo Oh</a>, <a href="/search/cs?searchtype=author&query=Choi%2C+E">Edward Choi</a>, <a href="/search/cs?searchtype=author&query=Etemad%2C+A">Ali Etemad</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.14107v1-abstract-short" style="display: inline;"> Wearable technologies enable continuous monitoring of various health metrics, such as physical activity, heart rate, sleep, and stress levels. A key challenge with wearable data is obtaining quality labels. Unlike modalities like video where the videos themselves can be effectively used to label objects or events, wearable data do not contain obvious cues about the physical manifestation of the us… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.14107v1-abstract-full').style.display = 'inline'; document.getElementById('2401.14107v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.14107v1-abstract-full" style="display: none;"> Wearable technologies enable continuous monitoring of various health metrics, such as physical activity, heart rate, sleep, and stress levels. A key challenge with wearable data is obtaining quality labels. Unlike modalities like video where the videos themselves can be effectively used to label objects or events, wearable data do not contain obvious cues about the physical manifestation of the users and usually require rich metadata. As a result, label noise can become an increasingly thorny issue when labeling such data. In this paper, we propose a novel solution to address noisy label learning, entitled Few-Shot Human-in-the-Loop Refinement (FHLR). Our method initially learns a seed model using weak labels. Next, it fine-tunes the seed model using a handful of expert corrections. Finally, it achieves better generalizability and robustness by merging the seed and fine-tuned models via weighted parameter averaging. We evaluate our approach on four challenging tasks and datasets, and compare it against eight competitive baselines designed to deal with noisy labels. We show that FHLR achieves significantly better performance when learning from noisy labels and achieves state-of-the-art by a large margin, with up to 19% accuracy improvement under symmetric and asymmetric noise. Notably, we find that FHLR is particularly robust to increased label noise, unlike prior works that suffer from severe performance degradation. Our work not only achieves better generalization in high-stakes health sensing benchmarks but also sheds light on how noise affects commonly-used models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.14107v1-abstract-full').style.display = 'none'; document.getElementById('2401.14107v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.02255">arXiv:2401.02255</a> <span> [<a href="https://arxiv.org/pdf/2401.02255">pdf</a>, <a href="https://arxiv.org/format/2401.02255">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Balancing Continual Learning and Fine-tuning for Human Activity Recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tang%2C+C+I">Chi Ian Tang</a>, <a href="/search/cs?searchtype=author&query=Qendro%2C+L">Lorena Qendro</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Kawsar%2C+F">Fahim Kawsar</a>, <a href="/search/cs?searchtype=author&query=Mathur%2C+A">Akhil Mathur</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.02255v1-abstract-short" style="display: inline;"> Wearable-based Human Activity Recognition (HAR) is a key task in human-centric machine learning due to its fundamental understanding of human behaviours. Due to the dynamic nature of human behaviours, continual learning promises HAR systems that are tailored to users' needs. However, because of the difficulty in collecting labelled data with wearable sensors, existing approaches that focus on supe… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.02255v1-abstract-full').style.display = 'inline'; document.getElementById('2401.02255v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.02255v1-abstract-full" style="display: none;"> Wearable-based Human Activity Recognition (HAR) is a key task in human-centric machine learning due to its fundamental understanding of human behaviours. Due to the dynamic nature of human behaviours, continual learning promises HAR systems that are tailored to users' needs. However, because of the difficulty in collecting labelled data with wearable sensors, existing approaches that focus on supervised continual learning have limited applicability, while unsupervised continual learning methods only handle representation learning while delaying classifier training to a later stage. This work explores the adoption and adaptation of CaSSLe, a continual self-supervised learning model, and Kaizen, a semi-supervised continual learning model that balances representation learning and down-stream classification, for the task of wearable-based HAR. These schemes re-purpose contrastive learning for knowledge retention and, Kaizen combines that with self-training in a unified scheme that can leverage unlabelled and labelled data for continual learning. In addition to comparing state-of-the-art self-supervised continual learning schemes, we further investigated the importance of different loss terms and explored the trade-off between knowledge retention and learning from new tasks. In particular, our extensive evaluation demonstrated that the use of a weighting factor that reflects the ratio between learned and new classes achieves the best overall trade-off in continual learning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.02255v1-abstract-full').style.display = 'none'; document.getElementById('2401.02255v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">AAAI 2024 HCRL (Human-Centric Representation Learning) Workshop</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.01640">arXiv:2401.01640</a> <span> [<a href="https://arxiv.org/pdf/2401.01640">pdf</a>, <a href="https://arxiv.org/format/2401.01640">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Evaluating Fairness in Self-supervised and Supervised Models for Sequential Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yfantidou%2C+S">Sofia Yfantidou</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Constantinides%2C+M">Marios Constantinides</a>, <a href="/search/cs?searchtype=author&query=Vakali%2C+A">Athena Vakali</a>, <a href="/search/cs?searchtype=author&query=Quercia%2C+D">Daniele Quercia</a>, <a href="/search/cs?searchtype=author&query=Kawsar%2C+F">Fahim Kawsar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.01640v1-abstract-short" style="display: inline;"> Self-supervised learning (SSL) has become the de facto training paradigm of large models where pre-training is followed by supervised fine-tuning using domain-specific data and labels. Hypothesizing that SSL models would learn more generic, hence less biased, representations, this study explores the impact of pre-training and fine-tuning strategies on fairness (i.e., performing equally on differen… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.01640v1-abstract-full').style.display = 'inline'; document.getElementById('2401.01640v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.01640v1-abstract-full" style="display: none;"> Self-supervised learning (SSL) has become the de facto training paradigm of large models where pre-training is followed by supervised fine-tuning using domain-specific data and labels. Hypothesizing that SSL models would learn more generic, hence less biased, representations, this study explores the impact of pre-training and fine-tuning strategies on fairness (i.e., performing equally on different demographic breakdowns). Motivated by human-centric applications on real-world timeseries data, we interpret inductive biases on the model, layer, and metric levels by systematically comparing SSL models to their supervised counterparts. Our findings demonstrate that SSL has the capacity to achieve performance on par with supervised methods while significantly enhancing fairness--exhibiting up to a 27% increase in fairness with a mere 1% loss in performance through self-supervision. Ultimately, this work underscores SSL's potential in human-centric computing, particularly high-stakes, data-scarce application domains like healthcare. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.01640v1-abstract-full').style.display = 'none'; document.getElementById('2401.01640v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Paper accepted in Human-Centric Representation Learning workshop at AAAI 2024 (https://hcrl-workshop.github.io/2024/)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.12877">arXiv:2309.12877</a> <span> [<a href="https://arxiv.org/pdf/2309.12877">pdf</a>, <a href="https://arxiv.org/ps/2309.12877">ps</a>, <a href="https://arxiv.org/format/2309.12877">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Emerging Technologies">cs.ET</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3594739.3605107">10.1145/3594739.3605107 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> FairComp: Workshop on Fairness and Robustness in Machine Learning for Ubiquitous Computing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yfantidou%2C+S">Sofia Yfantidou</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Constantinides%2C+M">Marios Constantinides</a>, <a href="/search/cs?searchtype=author&query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&query=van+Berkel%2C+N">Niels van Berkel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.12877v1-abstract-short" style="display: inline;"> How can we ensure that Ubiquitous Computing (UbiComp) research outcomes are both ethical and fair? While fairness in machine learning (ML) has gained traction in recent years, fairness in UbiComp remains unexplored. This workshop aims to discuss fairness in UbiComp research and its social, technical, and legal implications. From a social perspective, we will examine the relationship between fairne… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.12877v1-abstract-full').style.display = 'inline'; document.getElementById('2309.12877v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.12877v1-abstract-full" style="display: none;"> How can we ensure that Ubiquitous Computing (UbiComp) research outcomes are both ethical and fair? While fairness in machine learning (ML) has gained traction in recent years, fairness in UbiComp remains unexplored. This workshop aims to discuss fairness in UbiComp research and its social, technical, and legal implications. From a social perspective, we will examine the relationship between fairness and UbiComp research and identify pathways to ensure that ubiquitous technologies do not cause harm or infringe on individual rights. From a technical perspective, we will initiate a discussion on data practices to develop bias mitigation approaches tailored to UbiComp research. From a legal perspective, we will examine how new policies shape our community's work and future research. We aim to foster a vibrant community centered around the topic of responsible UbiComp, while also charting a clear path for future research endeavours in this field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.12877v1-abstract-full').style.display = 'none'; document.getElementById('2309.12877v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Adjunct Proceedings of the 2023 ACM International Joint Conference on Pervasive and Ubiquitous Computing & the 2023 ACM International Symposium on Wearable Computing (UbiComp/ISWC '23 Adjunct ) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.06236">arXiv:2309.06236</a> <span> [<a href="https://arxiv.org/pdf/2309.06236">pdf</a>, <a href="https://arxiv.org/format/2309.06236">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> The first step is the hardest: Pitfalls of Representing and Tokenizing Temporal Data for Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Kawsar%2C+F">Fahim Kawsar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.06236v1-abstract-short" style="display: inline;"> Large Language Models (LLMs) have demonstrated remarkable generalization across diverse tasks, leading individuals to increasingly use them as personal assistants and universal computing engines. Nevertheless, a notable obstacle emerges when feeding numerical/temporal data into these models, such as data sourced from wearables or electronic health records. LLMs employ tokenizers in their input tha… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.06236v1-abstract-full').style.display = 'inline'; document.getElementById('2309.06236v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.06236v1-abstract-full" style="display: none;"> Large Language Models (LLMs) have demonstrated remarkable generalization across diverse tasks, leading individuals to increasingly use them as personal assistants and universal computing engines. Nevertheless, a notable obstacle emerges when feeding numerical/temporal data into these models, such as data sourced from wearables or electronic health records. LLMs employ tokenizers in their input that break down text into smaller units. However, tokenizers are not designed to represent numerical values and might struggle to understand repetitive patterns and context, treating consecutive values as separate tokens and disregarding their temporal relationships. Here, we discuss recent works that employ LLMs for human-centric tasks such as in mobile health sensing and present a case study showing that popular LLMs tokenize temporal data incorrectly. To address that, we highlight potential solutions such as prompt tuning with lightweight embedding layers as well as multimodal adapters, that can help bridge this "modality gap". While the capability of language models to generalize to other modalities with minimal or no finetuning is exciting, this paper underscores the fact that their outputs cannot be meaningful if they stumble over input nuances. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.06236v1-abstract-full').style.display = 'none'; document.getElementById('2309.06236v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at the Generative AI for Pervasive Computing Symposium (GenAI4PC) at UbiComp 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.16847">arXiv:2307.16847</a> <span> [<a href="https://arxiv.org/pdf/2307.16847">pdf</a>, <a href="https://arxiv.org/format/2307.16847">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> CroSSL: Cross-modal Self-Supervised Learning for Time-series through Latent Masking </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Deldari%2C+S">Shohreh Deldari</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Malekzadeh%2C+M">Mohammad Malekzadeh</a>, <a href="/search/cs?searchtype=author&query=Kawsar%2C+F">Fahim Kawsar</a>, <a href="/search/cs?searchtype=author&query=Salim%2C+F">Flora Salim</a>, <a href="/search/cs?searchtype=author&query=Mathur%2C+A">Akhil Mathur</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.16847v3-abstract-short" style="display: inline;"> Limited availability of labeled data for machine learning on multimodal time-series extensively hampers progress in the field. Self-supervised learning (SSL) is a promising approach to learning data representations without relying on labels. However, existing SSL methods require expensive computations of negative pairs and are typically designed for single modalities, which limits their versatilit… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.16847v3-abstract-full').style.display = 'inline'; document.getElementById('2307.16847v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.16847v3-abstract-full" style="display: none;"> Limited availability of labeled data for machine learning on multimodal time-series extensively hampers progress in the field. Self-supervised learning (SSL) is a promising approach to learning data representations without relying on labels. However, existing SSL methods require expensive computations of negative pairs and are typically designed for single modalities, which limits their versatility. We introduce CroSSL (Cross-modal SSL), which puts forward two novel concepts: masking intermediate embeddings produced by modality-specific encoders, and their aggregation into a global embedding through a cross-modal aggregator that can be fed to down-stream classifiers. CroSSL allows for handling missing modalities and end-to-end cross-modal learning without requiring prior data preprocessing for handling missing inputs or negative-pair sampling for contrastive learning. We evaluate our method on a wide range of data, including motion sensors such as accelerometers or gyroscopes and biosignals (heart rate, electroencephalograms, electromyograms, electrooculograms, and electrodermal) to investigate the impact of masking ratios and masking strategies for various data types and the robustness of the learned representations to missing data. Overall, CroSSL outperforms previous SSL and supervised benchmarks using minimal labeled data, and also sheds light on how latent masking can improve cross-modal learning. Our code is open-sourced at https://github.com/dr-bell/CroSSL. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.16847v3-abstract-full').style.display = 'none'; document.getElementById('2307.16847v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in WSDM24. Short version presented in ML4MHD @ICML23</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.16651">arXiv:2307.16651</a> <span> [<a href="https://arxiv.org/pdf/2307.16651">pdf</a>, <a href="https://arxiv.org/format/2307.16651">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> UDAMA: Unsupervised Domain Adaptation through Multi-discriminator Adversarial Training with Noisy Labels Improves Cardio-fitness Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wu%2C+Y">Yu Wu</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Jia%2C+H">Hong Jia</a>, <a href="/search/cs?searchtype=author&query=Perez-Pozuelo%2C+I">Ignacio Perez-Pozuelo</a>, <a href="/search/cs?searchtype=author&query=Gonzales%2C+T">Tomas Gonzales</a>, <a href="/search/cs?searchtype=author&query=Brage%2C+S">Soren Brage</a>, <a href="/search/cs?searchtype=author&query=Wareham%2C+N">Nicholas Wareham</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.16651v1-abstract-short" style="display: inline;"> Deep learning models have shown great promise in various healthcare monitoring applications. However, most healthcare datasets with high-quality (gold-standard) labels are small-scale, as directly collecting ground truth is often costly and time-consuming. As a result, models developed and validated on small-scale datasets often suffer from overfitting and do not generalize well to unseen scenario… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.16651v1-abstract-full').style.display = 'inline'; document.getElementById('2307.16651v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.16651v1-abstract-full" style="display: none;"> Deep learning models have shown great promise in various healthcare monitoring applications. However, most healthcare datasets with high-quality (gold-standard) labels are small-scale, as directly collecting ground truth is often costly and time-consuming. As a result, models developed and validated on small-scale datasets often suffer from overfitting and do not generalize well to unseen scenarios. At the same time, large amounts of imprecise (silver-standard) labeled data, annotated by approximate methods with the help of modern wearables and in the absence of ground truth validation, are starting to emerge. However, due to measurement differences, this data displays significant label distribution shifts, which motivates the use of domain adaptation. To this end, we introduce UDAMA, a method with two key components: Unsupervised Domain Adaptation and Multidiscriminator Adversarial Training, where we pre-train on the silver-standard data and employ adversarial adaptation with the gold-standard data along with two domain discriminators. In particular, we showcase the practical potential of UDAMA by applying it to Cardio-respiratory fitness (CRF) prediction. CRF is a crucial determinant of metabolic disease and mortality, and it presents labels with various levels of noise (goldand silver-standard), making it challenging to establish an accurate prediction model. Our results show promising performance by alleviating distribution shifts in various label shift settings. Additionally, by using data from two free-living cohort studies (Fenland and BBVS), we show that UDAMA consistently outperforms up to 12% compared to competitive transfer learning and state-of-the-art domain adaptation models, paving the way for leveraging noisy labeled data to improve fitness estimation at scale. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.16651v1-abstract-full').style.display = 'none'; document.getElementById('2307.16651v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at Machine Learning for Healthcare (MLHC) 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.12075">arXiv:2307.12075</a> <span> [<a href="https://arxiv.org/pdf/2307.12075">pdf</a>, <a href="https://arxiv.org/format/2307.12075">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3565066.3608685">10.1145/3565066.3608685 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> The State of Algorithmic Fairness in Mobile Human-Computer Interaction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yfantidou%2C+S">Sofia Yfantidou</a>, <a href="/search/cs?searchtype=author&query=Constantinides%2C+M">Marios Constantinides</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Vakali%2C+A">Athena Vakali</a>, <a href="/search/cs?searchtype=author&query=Quercia%2C+D">Daniele Quercia</a>, <a href="/search/cs?searchtype=author&query=Kawsar%2C+F">Fahim Kawsar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.12075v1-abstract-short" style="display: inline;"> This paper explores the intersection of Artificial Intelligence and Machine Learning (AI/ML) fairness and mobile human-computer interaction (MobileHCI). Through a comprehensive analysis of MobileHCI proceedings published between 2017 and 2022, we first aim to understand the current state of algorithmic fairness in the community. By manually analyzing 90 papers, we found that only a small portion (… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.12075v1-abstract-full').style.display = 'inline'; document.getElementById('2307.12075v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.12075v1-abstract-full" style="display: none;"> This paper explores the intersection of Artificial Intelligence and Machine Learning (AI/ML) fairness and mobile human-computer interaction (MobileHCI). Through a comprehensive analysis of MobileHCI proceedings published between 2017 and 2022, we first aim to understand the current state of algorithmic fairness in the community. By manually analyzing 90 papers, we found that only a small portion (5%) thereof adheres to modern fairness reporting, such as analyses conditioned on demographic breakdowns. At the same time, the overwhelming majority draws its findings from highly-educated, employed, and Western populations. We situate these findings within recent efforts to capture the current state of algorithmic fairness in mobile and wearable computing, and envision that our results will serve as an open invitation to the design and development of fairer ubiquitous technologies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.12075v1-abstract-full').style.display = 'none'; document.getElementById('2307.12075v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: text overlap with arXiv:2303.15585</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> 25th International Conference on Mobile Human-Computer Interaction (MobileHCI '23 Companion), September 26--29, 2023, Athens, Greece </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.17235">arXiv:2303.17235</a> <span> [<a href="https://arxiv.org/pdf/2303.17235">pdf</a>, <a href="https://arxiv.org/format/2303.17235">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Kaizen: Practical Self-supervised Continual Learning with Continual Fine-tuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tang%2C+C+I">Chi Ian Tang</a>, <a href="/search/cs?searchtype=author&query=Qendro%2C+L">Lorena Qendro</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Kawsar%2C+F">Fahim Kawsar</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a>, <a href="/search/cs?searchtype=author&query=Mathur%2C+A">Akhil Mathur</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.17235v2-abstract-short" style="display: inline;"> Self-supervised learning (SSL) has shown remarkable performance in computer vision tasks when trained offline. However, in a Continual Learning (CL) scenario where new data is introduced progressively, models still suffer from catastrophic forgetting. Retraining a model from scratch to adapt to newly generated data is time-consuming and inefficient. Previous approaches suggested re-purposing self-… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.17235v2-abstract-full').style.display = 'inline'; document.getElementById('2303.17235v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.17235v2-abstract-full" style="display: none;"> Self-supervised learning (SSL) has shown remarkable performance in computer vision tasks when trained offline. However, in a Continual Learning (CL) scenario where new data is introduced progressively, models still suffer from catastrophic forgetting. Retraining a model from scratch to adapt to newly generated data is time-consuming and inefficient. Previous approaches suggested re-purposing self-supervised objectives with knowledge distillation to mitigate forgetting across tasks, assuming that labels from all tasks are available during fine-tuning. In this paper, we generalize self-supervised continual learning in a practical setting where available labels can be leveraged in any step of the SSL process. With an increasing number of continual tasks, this offers more flexibility in the pre-training and fine-tuning phases. With Kaizen, we introduce a training architecture that is able to mitigate catastrophic forgetting for both the feature extractor and classifier with a carefully designed loss function. By using a set of comprehensive evaluation metrics reflecting different aspects of continual learning, we demonstrated that Kaizen significantly outperforms previous SSL models in competitive vision benchmarks, with up to 16.5% accuracy improvement on split CIFAR-100. Kaizen is able to balance the trade-off between knowledge retention and learning from new data with an end-to-end model, paving the way for practical deployment of continual learning systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.17235v2-abstract-full').style.display = 'none'; document.getElementById('2303.17235v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Presented at IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) 2024. The code for this work is available at https://github.com/dr-bell/kaizen</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), 2024, pp. 2841-2850 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.15585">arXiv:2303.15585</a> <span> [<a href="https://arxiv.org/pdf/2303.15585">pdf</a>, <a href="https://arxiv.org/format/2303.15585">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Beyond Accuracy: A Critical Review of Fairness in Machine Learning for Mobile and Wearable Computing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yfantidou%2C+S">Sofia Yfantidou</a>, <a href="/search/cs?searchtype=author&query=Constantinides%2C+M">Marios Constantinides</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Vakali%2C+A">Athena Vakali</a>, <a href="/search/cs?searchtype=author&query=Quercia%2C+D">Daniele Quercia</a>, <a href="/search/cs?searchtype=author&query=Kawsar%2C+F">Fahim Kawsar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.15585v3-abstract-short" style="display: inline;"> The field of mobile and wearable computing is undergoing a revolutionary integration of machine learning. Devices can now diagnose diseases, predict heart irregularities, and unlock the full potential of human cognition. However, the underlying algorithms powering these predictions are not immune to biases with respect to sensitive attributes (e.g., gender, race), leading to discriminatory outcome… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.15585v3-abstract-full').style.display = 'inline'; document.getElementById('2303.15585v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.15585v3-abstract-full" style="display: none;"> The field of mobile and wearable computing is undergoing a revolutionary integration of machine learning. Devices can now diagnose diseases, predict heart irregularities, and unlock the full potential of human cognition. However, the underlying algorithms powering these predictions are not immune to biases with respect to sensitive attributes (e.g., gender, race), leading to discriminatory outcomes. The goal of this work is to explore the extent to which the mobile and wearable computing community has adopted ways of reporting information about datasets and models to surface and, eventually, counter biases. Our systematic review of papers published in the Proceedings of the ACM Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT) journal from 2018-2022 indicates that, while there has been progress made on algorithmic fairness, there is still ample room for growth. Our findings show that only a small portion (5%) of published papers adheres to modern fairness reporting, while the overwhelming majority thereof focuses on accuracy or error metrics. To generalize these results across venues of similar scope, we analyzed recent proceedings of ACM MobiCom, MobiSys, and SenSys, IEEE Pervasive, and IEEE Transactions on Mobile Computing Computing, and found no deviation from our primary result. In light of these findings, our work provides practical guidelines for the design and development of mobile and wearable technologies that not only strive for accuracy but also fairness. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.15585v3-abstract-full').style.display = 'none'; document.getElementById('2303.15585v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.10475">arXiv:2211.10475</a> <span> [<a href="https://arxiv.org/pdf/2211.10475">pdf</a>, <a href="https://arxiv.org/format/2211.10475">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Turning Silver into Gold: Domain Adaptation with Noisy Labels for Wearable Cardio-Respiratory Fitness Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wu%2C+Y">Yu Wu</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Jia%2C+H">Hong Jia</a>, <a href="/search/cs?searchtype=author&query=Perez-Pozuelo%2C+I">Ignacio Perez-Pozuelo</a>, <a href="/search/cs?searchtype=author&query=Gonzales%2C+T+I">Tomas I. Gonzales</a>, <a href="/search/cs?searchtype=author&query=Brage%2C+S">Soren Brage</a>, <a href="/search/cs?searchtype=author&query=Wareham%2C+N">Nicholas Wareham</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.10475v1-abstract-short" style="display: inline;"> Deep learning models have shown great promise in various healthcare applications. However, most models are developed and validated on small-scale datasets, as collecting high-quality (gold-standard) labels for health applications is often costly and time-consuming. As a result, these models may suffer from overfitting and not generalize well to unseen data. At the same time, an extensive amount of… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.10475v1-abstract-full').style.display = 'inline'; document.getElementById('2211.10475v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.10475v1-abstract-full" style="display: none;"> Deep learning models have shown great promise in various healthcare applications. However, most models are developed and validated on small-scale datasets, as collecting high-quality (gold-standard) labels for health applications is often costly and time-consuming. As a result, these models may suffer from overfitting and not generalize well to unseen data. At the same time, an extensive amount of data with imprecise labels (silver-standard) is starting to be generally available, as collected from inexpensive wearables like accelerometers and electrocardiography sensors. These currently underutilized datasets and labels can be leveraged to produce more accurate clinical models. In this work, we propose UDAMA, a novel model with two key components: Unsupervised Domain Adaptation and Multi-discriminator Adversarial training, which leverage noisy data from source domain (the silver-standard dataset) to improve gold-standard modeling. We validate our framework on the challenging task of predicting lab-measured maximal oxygen consumption (VO$_{2}$max), the benchmark metric of cardio-respiratory fitness, using free-living wearable sensor data from two cohort studies as inputs. Our experiments show that the proposed framework achieves the best performance of corr = 0.665 $\pm$ 0.04, paving the way for accurate fitness estimation at scale. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.10475v1-abstract-full').style.display = 'none'; document.getElementById('2211.10475v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Extended Abstract presented at Machine Learning for Health (ML4H) symposium 2022, November 28th, 2022, New Orleans, United States & Virtual, http://www.ml4h.cc, 5 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.13398">arXiv:2205.13398</a> <span> [<a href="https://arxiv.org/pdf/2205.13398">pdf</a>, <a href="https://arxiv.org/format/2205.13398">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Looking for Out-of-Distribution Environments in Multi-center Critical Care Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Hyland%2C+S+L">Stephanie L. Hyland</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.13398v2-abstract-short" style="display: inline;"> Clinical machine learning models show a significant performance drop when tested in settings not seen during training. Domain generalisation models promise to alleviate this problem, however, there is still scepticism about whether they improve over traditional training. In this work, we take a principled approach to identifying Out of Distribution (OoD) environments, motivated by the problem of c… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.13398v2-abstract-full').style.display = 'inline'; document.getElementById('2205.13398v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.13398v2-abstract-full" style="display: none;"> Clinical machine learning models show a significant performance drop when tested in settings not seen during training. Domain generalisation models promise to alleviate this problem, however, there is still scepticism about whether they improve over traditional training. In this work, we take a principled approach to identifying Out of Distribution (OoD) environments, motivated by the problem of cross-hospital generalization in critical care. We propose model-based and heuristic approaches to identify OoD environments and systematically compare models with different levels of held-out information. We find that access to OoD data does not translate to increased performance, pointing to inherent limitations in defining potential OoD environments potentially due to data harmonisation and sampling. Echoing similar results with other popular clinical benchmarks in the literature, new approaches are required to evaluate robust models on health records. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.13398v2-abstract-full').style.display = 'none'; document.getElementById('2205.13398v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Extended Abstract presented at Machine Learning for Health (ML4H) symposium 2022, November 28th, 2022, New Orleans, United States & Virtual, http://www.ml4h.cc, 17 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.03116">arXiv:2205.03116</a> <span> [<a href="https://arxiv.org/pdf/2205.03116">pdf</a>, <a href="https://arxiv.org/format/2205.03116">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41746-022-00719-1">10.1038/s41746-022-00719-1 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Longitudinal cardio-respiratory fitness prediction through wearables in free-living environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Perez-Pozuelo%2C+I">Ignacio Perez-Pozuelo</a>, <a href="/search/cs?searchtype=author&query=Gonzales%2C+T+I">Tomas I. Gonzales</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+Y">Yu Wu</a>, <a href="/search/cs?searchtype=author&query=Brage%2C+S">Soren Brage</a>, <a href="/search/cs?searchtype=author&query=Wareham%2C+N">Nicholas Wareham</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.03116v2-abstract-short" style="display: inline;"> Cardiorespiratory fitness is an established predictor of metabolic disease and mortality. Fitness is directly measured as maximal oxygen consumption (VO$_{2}max$), or indirectly assessed using heart rate responses to standard exercise tests. However, such testing is costly and burdensome because it requires specialized equipment such as treadmills and oxygen masks, limiting its utility. Modern wea… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.03116v2-abstract-full').style.display = 'inline'; document.getElementById('2205.03116v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.03116v2-abstract-full" style="display: none;"> Cardiorespiratory fitness is an established predictor of metabolic disease and mortality. Fitness is directly measured as maximal oxygen consumption (VO$_{2}max$), or indirectly assessed using heart rate responses to standard exercise tests. However, such testing is costly and burdensome because it requires specialized equipment such as treadmills and oxygen masks, limiting its utility. Modern wearables capture dynamic real-world data which could improve fitness prediction. In this work, we design algorithms and models that convert raw wearable sensor data into cardiorespiratory fitness estimates. We validate these estimates' ability to capture fitness profiles in free-living conditions using the Fenland Study (N=11,059), along with its longitudinal cohort (N=2,675), and a third external cohort using the UK Biobank Validation Study (N=181) who underwent maximal VO$_{2}max$ testing, the gold standard measurement of fitness. Our results show that the combination of wearables and other biomarkers as inputs to neural networks yields a strong correlation to ground truth in a holdout sample (r = 0.82, 95CI 0.80-0.83), outperforming other approaches and models and detects fitness change over time (e.g., after 7 years). We also show how the model's latent space can be used for fitness-aware patient subtyping paving the way to scalable interventions and personalized trial recruitment. These results demonstrate the value of wearables for fitness estimation that today can be measured only with laboratory tests. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.03116v2-abstract-full').style.display = 'none'; document.getElementById('2205.03116v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in Nature Digital Medicine, 16 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.08981">arXiv:2202.08981</a> <span> [<a href="https://arxiv.org/pdf/2202.08981">pdf</a>, <a href="https://arxiv.org/format/2202.08981">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> A Summary of the ComParE COVID-19 Challenges </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Coppock%2C+H">Harry Coppock</a>, <a href="/search/cs?searchtype=author&query=Akman%2C+A">Alican Akman</a>, <a href="/search/cs?searchtype=author&query=Bergler%2C+C">Christian Bergler</a>, <a href="/search/cs?searchtype=author&query=Gerczuk%2C+M">Maurice Gerczuk</a>, <a href="/search/cs?searchtype=author&query=Brown%2C+C">Chlo毛 Brown</a>, <a href="/search/cs?searchtype=author&query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&query=Grammenos%2C+A">Andreas Grammenos</a>, <a href="/search/cs?searchtype=author&query=Hasthanasombat%2C+A">Apinan Hasthanasombat</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&query=Cicuta%2C+P">Pietro Cicuta</a>, <a href="/search/cs?searchtype=author&query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&query=Amiriparian%2C+S">Shahin Amiriparian</a>, <a href="/search/cs?searchtype=author&query=Baird%2C+A">Alice Baird</a>, <a href="/search/cs?searchtype=author&query=Stappen%2C+L">Lukas Stappen</a>, <a href="/search/cs?searchtype=author&query=Ottl%2C+S">Sandra Ottl</a>, <a href="/search/cs?searchtype=author&query=Tzirakis%2C+P">Panagiotis Tzirakis</a>, <a href="/search/cs?searchtype=author&query=Batliner%2C+A">Anton Batliner</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a>, <a href="/search/cs?searchtype=author&query=Schuller%2C+B+W">Bj枚rn W. Schuller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.08981v1-abstract-short" style="display: inline;"> The COVID-19 pandemic has caused massive humanitarian and economic damage. Teams of scientists from a broad range of disciplines have searched for methods to help governments and communities combat the disease. One avenue from the machine learning field which has been explored is the prospect of a digital mass test which can detect COVID-19 from infected individuals' respiratory sounds. We present… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.08981v1-abstract-full').style.display = 'inline'; document.getElementById('2202.08981v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.08981v1-abstract-full" style="display: none;"> The COVID-19 pandemic has caused massive humanitarian and economic damage. Teams of scientists from a broad range of disciplines have searched for methods to help governments and communities combat the disease. One avenue from the machine learning field which has been explored is the prospect of a digital mass test which can detect COVID-19 from infected individuals' respiratory sounds. We present a summary of the results from the INTERSPEECH 2021 Computational Paralinguistics Challenges: COVID-19 Cough, (CCS) and COVID-19 Speech, (CSS). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.08981v1-abstract-full').style.display = 'none'; document.getElementById('2202.08981v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 13 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.01232">arXiv:2201.01232</a> <span> [<a href="https://arxiv.org/pdf/2201.01232">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.2196/37004">10.2196/37004 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Exploring Longitudinal Cough, Breath, and Voice Data for COVID-19 Progression Prediction via Sequential Deep Learning: Model Development and Validation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Dang%2C+T">Ting Dang</a>, <a href="/search/cs?searchtype=author&query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Bondareva%2C+E">Erika Bondareva</a>, <a href="/search/cs?searchtype=author&query=Siegele-Brown%2C+C">Chlo毛 Siegele-Brown</a>, <a href="/search/cs?searchtype=author&query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&query=Grammenos%2C+A">Andreas Grammenos</a>, <a href="/search/cs?searchtype=author&query=Hasthanasombat%2C+A">Apinan Hasthanasombat</a>, <a href="/search/cs?searchtype=author&query=Floto%2C+A">Andres Floto</a>, <a href="/search/cs?searchtype=author&query=Cicuta%2C+P">Pietro Cicuta</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.01232v2-abstract-short" style="display: inline;"> Recent work has shown the potential of using audio data (eg, cough, breathing, and voice) in the screening for COVID-19. However, these approaches only focus on one-off detection and detect the infection given the current audio sample, but do not monitor disease progression in COVID-19. Limited exploration has been put forward to continuously monitor COVID-19 progression, especially recovery, thro… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.01232v2-abstract-full').style.display = 'inline'; document.getElementById('2201.01232v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.01232v2-abstract-full" style="display: none;"> Recent work has shown the potential of using audio data (eg, cough, breathing, and voice) in the screening for COVID-19. However, these approaches only focus on one-off detection and detect the infection given the current audio sample, but do not monitor disease progression in COVID-19. Limited exploration has been put forward to continuously monitor COVID-19 progression, especially recovery, through longitudinal audio data. Tracking disease progression characteristics could lead to more timely treatment. The primary objective of this study is to explore the potential of longitudinal audio samples over time for COVID-19 progression prediction and, especially, recovery trend prediction using sequential deep learning techniques. Crowdsourced respiratory audio data, including breathing, cough, and voice samples, from 212 individuals over 5-385 days were analyzed. We developed a deep learning-enabled tracking tool using gated recurrent units (GRUs) to detect COVID-19 progression by exploring the audio dynamics of the individuals' historical audio biomarkers. The investigation comprised 2 parts: (1) COVID-19 detection in terms of positive and negative (healthy) tests, and (2) longitudinal disease progression prediction over time in terms of probability of positive tests. The strong performance for COVID-19 detection, yielding an AUROC of 0.79, a sensitivity of 0.75, and a specificity of 0.71 supported the effectiveness of the approach compared to methods that do not leverage longitudinal dynamics. We further examined the predicted disease progression trajectory, displaying high consistency with test results with a correlation of 0.75 in the test cohort and 0.86 in a subset of the test cohort who reported recovery. Our findings suggest that monitoring COVID-19 evolution via longitudinal audio data has potential in the tracking of individuals' disease progression and recovery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.01232v2-abstract-full').style.display = 'none'; document.getElementById('2201.01232v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Updated title. Revised format according to journal requirements</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.07089">arXiv:2111.07089</a> <span> [<a href="https://arxiv.org/pdf/2111.07089">pdf</a>, <a href="https://arxiv.org/format/2111.07089">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Evaluating Contrastive Learning on Wearable Timeseries for Downstream Clinical Outcomes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shah%2C+K">Kevalee Shah</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Tang%2C+C+I">Chi Ian Tang</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.07089v1-abstract-short" style="display: inline;"> Vast quantities of person-generated health data (wearables) are collected but the process of annotating to feed to machine learning models is impractical. This paper discusses ways in which self-supervised approaches that use contrastive losses, such as SimCLR and BYOL, previously applied to the vision domain, can be applied to high-dimensional health signals for downstream classification tasks of… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.07089v1-abstract-full').style.display = 'inline'; document.getElementById('2111.07089v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.07089v1-abstract-full" style="display: none;"> Vast quantities of person-generated health data (wearables) are collected but the process of annotating to feed to machine learning models is impractical. This paper discusses ways in which self-supervised approaches that use contrastive losses, such as SimCLR and BYOL, previously applied to the vision domain, can be applied to high-dimensional health signals for downstream classification tasks of various diseases spanning sleep, heart, and metabolic conditions. To this end, we adapt the data augmentation step and the overall architecture to suit the temporal nature of the data (wearable traces) and evaluate on 5 downstream tasks by comparing other state-of-the-art methods including supervised learning and an adversarial unsupervised representation learning method. We show that SimCLR outperforms the adversarial method and a fully-supervised method in the majority of the downstream evaluation tasks, and that all self-supervised methods outperform the fully-supervised methods. This work provides a comprehensive benchmark for contrastive methods applied to the wearable time-series domain, showing the promise of task-agnostic representations for downstream clinical outcomes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.07089v1-abstract-full').style.display = 'none'; document.getElementById('2111.07089v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Machine Learning for Health (ML4H) - Extended Abstract</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.15523">arXiv:2106.15523</a> <span> [<a href="https://arxiv.org/pdf/2106.15523">pdf</a>, <a href="https://arxiv.org/format/2106.15523">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Sounds of COVID-19: exploring realistic performance of audio-based digital testing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Bondareva%2C+E">Erika Bondareva</a>, <a href="/search/cs?searchtype=author&query=Brown%2C+C">Chlo毛 Brown</a>, <a href="/search/cs?searchtype=author&query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&query=Dang%2C+T">Ting Dang</a>, <a href="/search/cs?searchtype=author&query=Grammenos%2C+A">Andreas Grammenos</a>, <a href="/search/cs?searchtype=author&query=Hasthanasombat%2C+A">Apinan Hasthanasombat</a>, <a href="/search/cs?searchtype=author&query=Floto%2C+A">Andres Floto</a>, <a href="/search/cs?searchtype=author&query=Cicuta%2C+P">Pietro Cicuta</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.15523v1-abstract-short" style="display: inline;"> Researchers have been battling with the question of how we can identify Coronavirus disease (COVID-19) cases efficiently, affordably and at scale. Recent work has shown how audio based approaches, which collect respiratory audio data (cough, breathing and voice) can be used for testing, however there is a lack of exploration of how biases and methodological decisions impact these tools' performanc… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.15523v1-abstract-full').style.display = 'inline'; document.getElementById('2106.15523v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.15523v1-abstract-full" style="display: none;"> Researchers have been battling with the question of how we can identify Coronavirus disease (COVID-19) cases efficiently, affordably and at scale. Recent work has shown how audio based approaches, which collect respiratory audio data (cough, breathing and voice) can be used for testing, however there is a lack of exploration of how biases and methodological decisions impact these tools' performance in practice. In this paper, we explore the realistic performance of audio-based digital testing of COVID-19. To investigate this, we collected a large crowdsourced respiratory audio dataset through a mobile app, alongside recent COVID-19 test result and symptoms intended as a ground truth. Within the collected dataset, we selected 5,240 samples from 2,478 participants and split them into different participant-independent sets for model development and validation. Among these, we controlled for potential confounding factors (such as demographics and language). The unbiased model takes features extracted from breathing, coughs, and voice signals as predictors and yields an AUC-ROC of 0.71 (95\% CI: 0.65$-$0.77). We further explore different unbalanced distributions to show how biases and participant splits affect performance. Finally, we discuss how the realistic model presented could be integrated in clinical practice to realize continuous, ubiquitous, sustainable and affordable testing at population scale. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.15523v1-abstract-full').style.display = 'none'; document.getElementById('2106.15523v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.10970">arXiv:2106.10970</a> <span> [<a href="https://arxiv.org/pdf/2106.10970">pdf</a>, <a href="https://arxiv.org/format/2106.10970">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3447526.3472061">10.1145/3447526.3472061 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Anticipatory Detection of Compulsive Body-focused Repetitive Behaviors with Wearables </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Searle%2C+B+L">Benjamin Lucas Searle</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Constantinides%2C+M">Marios Constantinides</a>, <a href="/search/cs?searchtype=author&query=Quercia%2C+D">Daniele Quercia</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.10970v1-abstract-short" style="display: inline;"> Body-focused repetitive behaviors (BFRBs), like face-touching or skin-picking, are hand-driven behaviors which can damage one's appearance, if not identified early and treated. Technology for automatic detection is still under-explored, with few previous works being limited to wearables with single modalities (e.g., motion). Here, we propose a multi-sensory approach combining motion, orientation,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.10970v1-abstract-full').style.display = 'inline'; document.getElementById('2106.10970v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.10970v1-abstract-full" style="display: none;"> Body-focused repetitive behaviors (BFRBs), like face-touching or skin-picking, are hand-driven behaviors which can damage one's appearance, if not identified early and treated. Technology for automatic detection is still under-explored, with few previous works being limited to wearables with single modalities (e.g., motion). Here, we propose a multi-sensory approach combining motion, orientation, and heart rate sensors to detect BFRBs. We conducted a feasibility study in which participants (N=10) were exposed to BFRBs-inducing tasks, and analyzed 380 mins of signals under an extensive evaluation of sensing modalities, cross-validation methods, and observation windows. Our models achieved an AUC > 0.90 in distinguishing BFRBs, which were more evident in observation windows 5 mins prior to the behavior as opposed to 1-min ones. In a follow-up qualitative survey, we found that not only the timing of detection matters but also models need to be context-aware, when designing just-in-time interventions to prevent BFRBs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.10970v1-abstract-full').style.display = 'none'; document.getElementById('2106.10970v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to ACM MobileHCI 2021 (20 pages, dataset/code: https://github.com/Bhorda/BFRBAnticipationDataset)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.13468">arXiv:2102.13468</a> <span> [<a href="https://arxiv.org/pdf/2102.13468">pdf</a>, <a href="https://arxiv.org/format/2102.13468">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> </div> <p class="title is-5 mathjax"> The INTERSPEECH 2021 Computational Paralinguistics Challenge: COVID-19 Cough, COVID-19 Speech, Escalation & Primates </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Schuller%2C+B+W">Bj枚rn W. Schuller</a>, <a href="/search/cs?searchtype=author&query=Batliner%2C+A">Anton Batliner</a>, <a href="/search/cs?searchtype=author&query=Bergler%2C+C">Christian Bergler</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a>, <a href="/search/cs?searchtype=author&query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&query=Lefter%2C+I">Iulia Lefter</a>, <a href="/search/cs?searchtype=author&query=Kaya%2C+H">Heysem Kaya</a>, <a href="/search/cs?searchtype=author&query=Amiriparian%2C+S">Shahin Amiriparian</a>, <a href="/search/cs?searchtype=author&query=Baird%2C+A">Alice Baird</a>, <a href="/search/cs?searchtype=author&query=Stappen%2C+L">Lukas Stappen</a>, <a href="/search/cs?searchtype=author&query=Ottl%2C+S">Sandra Ottl</a>, <a href="/search/cs?searchtype=author&query=Gerczuk%2C+M">Maurice Gerczuk</a>, <a href="/search/cs?searchtype=author&query=Tzirakis%2C+P">Panagiotis Tzirakis</a>, <a href="/search/cs?searchtype=author&query=Brown%2C+C">Chlo毛 Brown</a>, <a href="/search/cs?searchtype=author&query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&query=Grammenos%2C+A">Andreas Grammenos</a>, <a href="/search/cs?searchtype=author&query=Hasthanasombat%2C+A">Apinan Hasthanasombat</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&query=Cicuta%2C+P">Pietro Cicuta</a>, <a href="/search/cs?searchtype=author&query=Rothkrantz%2C+L+J+M">Leon J. M. Rothkrantz</a>, <a href="/search/cs?searchtype=author&query=Zwerts%2C+J">Joeri Zwerts</a>, <a href="/search/cs?searchtype=author&query=Treep%2C+J">Jelle Treep</a>, <a href="/search/cs?searchtype=author&query=Kaandorp%2C+C">Casper Kaandorp</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.13468v1-abstract-short" style="display: inline;"> The INTERSPEECH 2021 Computational Paralinguistics Challenge addresses four different problems for the first time in a research competition under well-defined conditions: In the COVID-19 Cough and COVID-19 Speech Sub-Challenges, a binary classification on COVID-19 infection has to be made based on coughing sounds and speech; in the Escalation SubChallenge, a three-way assessment of the level of es… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.13468v1-abstract-full').style.display = 'inline'; document.getElementById('2102.13468v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.13468v1-abstract-full" style="display: none;"> The INTERSPEECH 2021 Computational Paralinguistics Challenge addresses four different problems for the first time in a research competition under well-defined conditions: In the COVID-19 Cough and COVID-19 Speech Sub-Challenges, a binary classification on COVID-19 infection has to be made based on coughing sounds and speech; in the Escalation SubChallenge, a three-way assessment of the level of escalation in a dialogue is featured; and in the Primates Sub-Challenge, four species vs background need to be classified. We describe the Sub-Challenges, baseline feature extraction, and classifiers based on the 'usual' COMPARE and BoAW features as well as deep unsupervised representation learning using the AuDeep toolkit, and deep feature extraction from pre-trained CNNs using the Deep Spectrum toolkit; in addition, we add deep end-to-end sequential modelling, and partially linguistic analysis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.13468v1-abstract-full').style.display = 'none'; document.getElementById('2102.13468v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.7; I.5.0; J.3 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.06073">arXiv:2102.06073</a> <span> [<a href="https://arxiv.org/pdf/2102.06073">pdf</a>, <a href="https://arxiv.org/format/2102.06073">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3448112">10.1145/3448112 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> SelfHAR: Improving Human Activity Recognition through Self-training with Unlabeled Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tang%2C+C+I">Chi Ian Tang</a>, <a href="/search/cs?searchtype=author&query=Perez-Pozuelo%2C+I">Ignacio Perez-Pozuelo</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Brage%2C+S">Soren Brage</a>, <a href="/search/cs?searchtype=author&query=Wareham%2C+N">Nick Wareham</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.06073v1-abstract-short" style="display: inline;"> Machine learning and deep learning have shown great promise in mobile sensing applications, including Human Activity Recognition. However, the performance of such models in real-world settings largely depends on the availability of large datasets that captures diverse behaviors. Recently, studies in computer vision and natural language processing have shown that leveraging massive amounts of unlab… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.06073v1-abstract-full').style.display = 'inline'; document.getElementById('2102.06073v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.06073v1-abstract-full" style="display: none;"> Machine learning and deep learning have shown great promise in mobile sensing applications, including Human Activity Recognition. However, the performance of such models in real-world settings largely depends on the availability of large datasets that captures diverse behaviors. Recently, studies in computer vision and natural language processing have shown that leveraging massive amounts of unlabeled data enables performance on par with state-of-the-art supervised models. In this work, we present SelfHAR, a semi-supervised model that effectively learns to leverage unlabeled mobile sensing datasets to complement small labeled datasets. Our approach combines teacher-student self-training, which distills the knowledge of unlabeled and labeled datasets while allowing for data augmentation, and multi-task self-supervision, which learns robust signal-level representations by predicting distorted versions of the input. We evaluated SelfHAR on various HAR datasets and showed state-of-the-art performance over supervised and previous semi-supervised approaches, with up to 12% increase in F1 score using the same number of model parameters at inference. Furthermore, SelfHAR is data-efficient, reaching similar performance using up to 10 times less labeled data compared to supervised approaches. Our work not only achieves state-of-the-art performance in a diverse set of HAR datasets, but also sheds light on how pre-training tasks may affect downstream performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.06073v1-abstract-full').style.display = 'none'; document.getElementById('2102.06073v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication in Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT) 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.05225">arXiv:2102.05225</a> <span> [<a href="https://arxiv.org/pdf/2102.05225">pdf</a>, <a href="https://arxiv.org/format/2102.05225">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ICASSP39728.2021.9414576">10.1109/ICASSP39728.2021.9414576 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Exploring Automatic COVID-19 Diagnosis via voice and symptoms from Crowdsourced Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&query=Brown%2C+C">Chlo毛 Brown</a>, <a href="/search/cs?searchtype=author&query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&query=Grammenos%2C+A">Andreas Grammenos</a>, <a href="/search/cs?searchtype=author&query=Hasthanasombat%2C+A">Apinan Hasthanasombat</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&query=Cicuta%2C+P">Pietro Cicuta</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.05225v1-abstract-short" style="display: inline;"> The development of fast and accurate screening tools, which could facilitate testing and prevent more costly clinical tests, is key to the current pandemic of COVID-19. In this context, some initial work shows promise in detecting diagnostic signals of COVID-19 from audio sounds. In this paper, we propose a voice-based framework to automatically detect individuals who have tested positive for COVI… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.05225v1-abstract-full').style.display = 'inline'; document.getElementById('2102.05225v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.05225v1-abstract-full" style="display: none;"> The development of fast and accurate screening tools, which could facilitate testing and prevent more costly clinical tests, is key to the current pandemic of COVID-19. In this context, some initial work shows promise in detecting diagnostic signals of COVID-19 from audio sounds. In this paper, we propose a voice-based framework to automatically detect individuals who have tested positive for COVID-19. We evaluate the performance of the proposed framework on a subset of data crowdsourced from our app, containing 828 samples from 343 participants. By combining voice signals and reported symptoms, an AUC of $0.79$ has been attained, with a sensitivity of $0.68$ and a specificity of $0.82$. We hope that this study opens the door to rapid, low-cost, and convenient pre-screening tools to automatically detect the disease. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.05225v1-abstract-full').style.display = 'none'; document.getElementById('2102.05225v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 3 figures, 2 tables, Accepted for publication at ICASSP 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.12121">arXiv:2011.12121</a> <span> [<a href="https://arxiv.org/pdf/2011.12121">pdf</a>, <a href="https://arxiv.org/format/2011.12121">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3450439.3451863">10.1145/3450439.3451863 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Self-supervised transfer learning of physiological representations from free-living wearable data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Perez-Pozuelo%2C+I">Ignacio Perez-Pozuelo</a>, <a href="/search/cs?searchtype=author&query=Brage%2C+S">Soren Brage</a>, <a href="/search/cs?searchtype=author&query=Wareham%2C+N+J">Nicholas J. Wareham</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.12121v1-abstract-short" style="display: inline;"> Wearable devices such as smartwatches are becoming increasingly popular tools for objectively monitoring physical activity in free-living conditions. To date, research has primarily focused on the purely supervised task of human activity recognition, demonstrating limited success in inferring high-level health outcomes from low-level signals. Here, we present a novel self-supervised representation… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.12121v1-abstract-full').style.display = 'inline'; document.getElementById('2011.12121v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.12121v1-abstract-full" style="display: none;"> Wearable devices such as smartwatches are becoming increasingly popular tools for objectively monitoring physical activity in free-living conditions. To date, research has primarily focused on the purely supervised task of human activity recognition, demonstrating limited success in inferring high-level health outcomes from low-level signals. Here, we present a novel self-supervised representation learning method using activity and heart rate (HR) signals without semantic labels. With a deep neural network, we set HR responses as the supervisory signal for the activity data, leveraging their underlying physiological relationship. In addition, we propose a custom quantile loss function that accounts for the long-tailed HR distribution present in the general population. We evaluate our model in the largest free-living combined-sensing dataset (comprising >280k hours of wrist accelerometer & wearable ECG data). Our contributions are two-fold: i) the pre-training task creates a model that can accurately forecast HR based only on cheap activity sensors, and ii) we leverage the information captured through this task by proposing a simple method to aggregate the learnt latent representations (embeddings) from the window-level to user-level. Notably, we show that the embeddings can generalize in various downstream tasks through transfer learning with linear classifiers, capturing physiologically meaningful, personalized information. For instance, they can be used to predict variables associated with individuals' health, fitness and demographic characteristics, outperforming unsupervised autoencoders and common bio-markers. Overall, we propose the first multimodal self-supervised method for behavioral and physiological data with implications for large-scale health and lifestyle monitoring. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.12121v1-abstract-full').style.display = 'none'; document.getElementById('2011.12121v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 3 figures (long version of extended abstract arXiv:2011.04601)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.11542">arXiv:2011.11542</a> <span> [<a href="https://arxiv.org/pdf/2011.11542">pdf</a>, <a href="https://arxiv.org/format/2011.11542">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Exploring Contrastive Learning in Human Activity Recognition for Healthcare </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tang%2C+C+I">Chi Ian Tang</a>, <a href="/search/cs?searchtype=author&query=Perez-Pozuelo%2C+I">Ignacio Perez-Pozuelo</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.11542v3-abstract-short" style="display: inline;"> Human Activity Recognition (HAR) constitutes one of the most important tasks for wearable and mobile sensing given its implications in human well-being and health monitoring. Motivated by the limitations of labeled datasets in HAR, particularly when employed in healthcare-related applications, this work explores the adoption and adaptation of SimCLR, a contrastive learning technique for visual rep… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.11542v3-abstract-full').style.display = 'inline'; document.getElementById('2011.11542v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.11542v3-abstract-full" style="display: none;"> Human Activity Recognition (HAR) constitutes one of the most important tasks for wearable and mobile sensing given its implications in human well-being and health monitoring. Motivated by the limitations of labeled datasets in HAR, particularly when employed in healthcare-related applications, this work explores the adoption and adaptation of SimCLR, a contrastive learning technique for visual representations, to HAR. The use of contrastive learning objectives causes the representations of corresponding views to be more similar, and those of non-corresponding views to be more different. After an extensive evaluation exploring 64 combinations of different signal transformations for augmenting the data, we observed significant performance differences owing to the order and the function thereof. In particular, preliminary results indicated an improvement over supervised and unsupervised learning methods when using fine-tuning and random rotation for augmentation, however, future work should explore under which conditions SimCLR is beneficial for HAR systems and other healthcare-related applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.11542v3-abstract-full').style.display = 'none'; document.getElementById('2011.11542v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Presented at Machine Learning for Mobile Health Workshop at NeurIPS 2020, Vancouver, Canada</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.04601">arXiv:2011.04601</a> <span> [<a href="https://arxiv.org/pdf/2011.04601">pdf</a>, <a href="https://arxiv.org/format/2011.04601">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Learning Generalizable Physiological Representations from Large-scale Wearable Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Perez-Pozuelo%2C+I">Ignacio Perez-Pozuelo</a>, <a href="/search/cs?searchtype=author&query=Brage%2C+S">Soren Brage</a>, <a href="/search/cs?searchtype=author&query=Wareham%2C+N+J">Nicholas J. Wareham</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.04601v1-abstract-short" style="display: inline;"> To date, research on sensor-equipped mobile devices has primarily focused on the purely supervised task of human activity recognition (walking, running, etc), demonstrating limited success in inferring high-level health outcomes from low-level signals, such as acceleration. Here, we present a novel self-supervised representation learning method using activity and heart rate (HR) signals without se… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.04601v1-abstract-full').style.display = 'inline'; document.getElementById('2011.04601v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.04601v1-abstract-full" style="display: none;"> To date, research on sensor-equipped mobile devices has primarily focused on the purely supervised task of human activity recognition (walking, running, etc), demonstrating limited success in inferring high-level health outcomes from low-level signals, such as acceleration. Here, we present a novel self-supervised representation learning method using activity and heart rate (HR) signals without semantic labels. With a deep neural network, we set HR responses as the supervisory signal for the activity data, leveraging their underlying physiological relationship. We evaluate our model in the largest free-living combined-sensing dataset (comprising more than 280,000 hours of wrist accelerometer & wearable ECG data) and show that the resulting embeddings can generalize in various downstream tasks through transfer learning with linear classifiers, capturing physiologically meaningful, personalized information. For instance, they can be used to predict (higher than 70 AUC) variables associated with individuals' health, fitness and demographic characteristics, outperforming unsupervised autoencoders and common bio-markers. Overall, we propose the first multimodal self-supervised method for behavioral and physiological data with implications for large-scale health and lifestyle monitoring. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.04601v1-abstract-full').style.display = 'none'; document.getElementById('2011.04601v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to the Machine Learning for Mobile Health workshop at NeurIPS 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.05919">arXiv:2006.05919</a> <span> [<a href="https://arxiv.org/pdf/2006.05919">pdf</a>, <a href="https://arxiv.org/format/2006.05919">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3394486.3412865">10.1145/3394486.3412865 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Exploring Automatic Diagnosis of COVID-19 from Crowdsourced Respiratory Sound Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Brown%2C+C">Chlo毛 Brown</a>, <a href="/search/cs?searchtype=author&query=Chauhan%2C+J">Jagmohan Chauhan</a>, <a href="/search/cs?searchtype=author&query=Grammenos%2C+A">Andreas Grammenos</a>, <a href="/search/cs?searchtype=author&query=Han%2C+J">Jing Han</a>, <a href="/search/cs?searchtype=author&query=Hasthanasombat%2C+A">Apinan Hasthanasombat</a>, <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Xia%2C+T">Tong Xia</a>, <a href="/search/cs?searchtype=author&query=Cicuta%2C+P">Pietro Cicuta</a>, <a href="/search/cs?searchtype=author&query=Mascolo%2C+C">Cecilia Mascolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.05919v3-abstract-short" style="display: inline;"> Audio signals generated by the human body (e.g., sighs, breathing, heart, digestion, vibration sounds) have routinely been used by clinicians as indicators to diagnose disease or assess disease progression. Until recently, such signals were usually collected through manual auscultation at scheduled visits. Research has now started to use digital technology to gather bodily sounds (e.g., from digit… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.05919v3-abstract-full').style.display = 'inline'; document.getElementById('2006.05919v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.05919v3-abstract-full" style="display: none;"> Audio signals generated by the human body (e.g., sighs, breathing, heart, digestion, vibration sounds) have routinely been used by clinicians as indicators to diagnose disease or assess disease progression. Until recently, such signals were usually collected through manual auscultation at scheduled visits. Research has now started to use digital technology to gather bodily sounds (e.g., from digital stethoscopes) for cardiovascular or respiratory examination, which could then be used for automatic analysis. Some initial work shows promise in detecting diagnostic signals of COVID-19 from voice and coughs. In this paper we describe our data analysis over a large-scale crowdsourced dataset of respiratory sounds collected to aid diagnosis of COVID-19. We use coughs and breathing to understand how discernible COVID-19 sounds are from those in asthma or healthy controls. Our results show that even a simple binary machine learning classifier is able to classify correctly healthy and COVID-19 sounds. We also show how we distinguish a user who tested positive for COVID-19 and has a cough from a healthy user with a cough, and users who tested positive for COVID-19 and have a cough from users with asthma and a cough. Our models achieve an AUC of above 80% across all tasks. These results are preliminary and only scratch the surface of the potential of this type of data and audio-based machine learning. This work opens the door to further investigation of how automatically analysed respiratory patterns could be used as pre-screening signals to aid COVID-19 diagnosis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.05919v3-abstract-full').style.display = 'none'; document.getElementById('2006.05919v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 6 figures, 2 tables, Accepted for publication at KDD'20 (Health Day)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.05531">arXiv:1811.05531</a> <span> [<a href="https://arxiv.org/pdf/1811.05531">pdf</a>, <a href="https://arxiv.org/format/1811.05531">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.knosys.2018.11.015">10.1016/j.knosys.2018.11.015 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Interactive dimensionality reduction using similarity projections </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a>, <a href="/search/cs?searchtype=author&query=Passalis%2C+N">Nikolaos Passalis</a>, <a href="/search/cs?searchtype=author&query=Tefas%2C+A">Anastasios Tefas</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.05531v1-abstract-short" style="display: inline;"> Recent advances in machine learning allow us to analyze and describe the content of high-dimensional data like text, audio, images or other signals. In order to visualize that data in 2D or 3D, usually Dimensionality Reduction (DR) techniques are employed. Most of these techniques, e.g., PCA or t-SNE, produce static projections without taking into account corrections from humans or other data expl… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.05531v1-abstract-full').style.display = 'inline'; document.getElementById('1811.05531v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.05531v1-abstract-full" style="display: none;"> Recent advances in machine learning allow us to analyze and describe the content of high-dimensional data like text, audio, images or other signals. In order to visualize that data in 2D or 3D, usually Dimensionality Reduction (DR) techniques are employed. Most of these techniques, e.g., PCA or t-SNE, produce static projections without taking into account corrections from humans or other data exploration scenarios. In this work, we propose the interactive Similarity Projection (iSP), a novel interactive DR framework based on similarity embeddings, where we form a differentiable objective based on the user interactions and perform learning using gradient descent, with an end-to-end trainable architecture. Two interaction scenarios are evaluated. First, a common methodology in multidimensional projection is to project a subset of data, arrange them in classes or clusters, and project the rest unseen dataset based on that manipulation, in a kind of semi-supervised interpolation. We report results that outperform competitive baselines in a wide range of metrics and datasets. Second, we explore the scenario of manipulating some classes, while enriching the optimization with high-dimensional neighbor information. Apart from improving classification precision and clustering on images and text documents, the new emerging structure of the projection unveils semantic manifolds. For example, on the Head Pose dataset, by just dragging the faces looking far left to the left and those looking far right to the right, all faces are re-arranged on a continuum even on the vertical axis (face up and down). This end-to-end framework can be used for fast, visual semi-supervised learning, manifold exploration, interactive domain adaptation of neural embeddings and transfer learning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.05531v1-abstract-full').style.display = 'none'; document.getElementById('1811.05531v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at Knowledge-Based Systems</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1612.06259">arXiv:1612.06259</a> <span> [<a href="https://arxiv.org/pdf/1612.06259">pdf</a>, <a href="https://arxiv.org/format/1612.06259">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Photo-Quality Evaluation based on Computational Aesthetics: Review of Feature Extraction Techniques </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Spathis%2C+D">Dimitris Spathis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1612.06259v1-abstract-short" style="display: inline;"> Researchers try to model the aesthetic quality of photographs into low and high- level features, drawing inspiration from art theory, psychology and marketing. We attempt to describe every feature extraction measure employed in the above process. The contribution of this literature review is the taxonomy of each feature by its implementation complexity, considering real-world applications and inte… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1612.06259v1-abstract-full').style.display = 'inline'; document.getElementById('1612.06259v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1612.06259v1-abstract-full" style="display: none;"> Researchers try to model the aesthetic quality of photographs into low and high- level features, drawing inspiration from art theory, psychology and marketing. We attempt to describe every feature extraction measure employed in the above process. The contribution of this literature review is the taxonomy of each feature by its implementation complexity, considering real-world applications and integration in mobile apps and digital cameras. Also, we discuss the machine learning results along with some unexplored research areas as future work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1612.06259v1-abstract-full').style.display = 'none'; document.getElementById('1612.06259v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 December, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2016. </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>