CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 112 results for author: <span class="mathjax">King, A</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=King%2C+A">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="King, A"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=King%2C+A&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="King, A"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=King%2C+A&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=King%2C+A&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=King%2C+A&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=King%2C+A&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11190">arXiv:2411.11190</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11190">pdf</a>, <a href="https://arxiv.org/format/2411.11190">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DeepSPV: An Interpretable Deep Learning Pipeline for 3D Spleen Volume Estimation from 2D Ultrasound Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+Z">Zhen Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Stojanovski%2C+D">David Stojanovski</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+L">Lei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Jogeesvaran%2C+H">Haran Jogeesvaran</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a>, <a href="/search/cs?searchtype=author&amp;query=Inusa%2C+B">Baba Inusa</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11190v1-abstract-short" style="display: inline;"> Splenomegaly, the enlargement of the spleen, is an important clinical indicator for various associated medical conditions, such as sickle cell disease (SCD). Spleen length measured from 2D ultrasound is the most widely used metric for characterising spleen size. However, it is still considered a surrogate measure, and spleen volume remains the gold standard for assessing spleen size. Accurate sple&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11190v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11190v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11190v1-abstract-full" style="display: none;"> Splenomegaly, the enlargement of the spleen, is an important clinical indicator for various associated medical conditions, such as sickle cell disease (SCD). Spleen length measured from 2D ultrasound is the most widely used metric for characterising spleen size. However, it is still considered a surrogate measure, and spleen volume remains the gold standard for assessing spleen size. Accurate spleen volume measurement typically requires 3D imaging modalities, such as computed tomography or magnetic resonance imaging, but these are not widely available, especially in the Global South which has a high prevalence of SCD. In this work, we introduce a deep learning pipeline, DeepSPV, for precise spleen volume estimation from single or dual 2D ultrasound images. The pipeline involves a segmentation network and a variational autoencoder for learning low-dimensional representations from the estimated segmentations. We investigate three approaches for spleen volume estimation and our best model achieves 86.62%/92.5% mean relative volume accuracy (MRVA) under single-view/dual-view settings, surpassing the performance of human experts. In addition, the pipeline can provide confidence intervals for the volume estimates as well as offering benefits in terms of interpretability, which further support clinicians in decision-making when identifying splenomegaly. We evaluate the full pipeline using a highly realistic synthetic dataset generated by a diffusion model, achieving an overall MRVA of 83.0% from a single 2D ultrasound image. Our proposed DeepSPV is the first work to use deep learning to estimate 3D spleen volume from 2D ultrasound images and can be seamlessly integrated into the current clinical workflow for spleen assessment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11190v1-abstract-full').style.display = 'none'; document.getElementById('2411.11190v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:2308.08038</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.11754">arXiv:2408.11754</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.11754">pdf</a>, <a href="https://arxiv.org/format/2408.11754">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Improving the Scan-rescan Precision of AI-based CMR Biomarker Estimation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wickremasinghe%2C+D+H">Dewmini Hasara Wickremasinghe</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Yiyang Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a>, <a href="/search/cs?searchtype=author&amp;query=Aljabar%2C+P">Paul Aljabar</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.11754v1-abstract-short" style="display: inline;"> Quantification of cardiac biomarkers from cine cardiovascular magnetic resonance (CMR) data using deep learning (DL) methods offers many advantages, such as increased accuracy and faster analysis. However, only a few studies have focused on the scan-rescan precision of the biomarker estimates, which is important for reproducibility and longitudinal analysis. Here, we propose a cardiac biomarker es&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11754v1-abstract-full').style.display = 'inline'; document.getElementById('2408.11754v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.11754v1-abstract-full" style="display: none;"> Quantification of cardiac biomarkers from cine cardiovascular magnetic resonance (CMR) data using deep learning (DL) methods offers many advantages, such as increased accuracy and faster analysis. However, only a few studies have focused on the scan-rescan precision of the biomarker estimates, which is important for reproducibility and longitudinal analysis. Here, we propose a cardiac biomarker estimation pipeline that not only focuses on achieving high segmentation accuracy but also on improving the scan-rescan precision of the computed biomarkers, namely left and right ventricular ejection fraction, and left ventricular myocardial mass. We evaluate two approaches to improve the apical-basal resolution of the segmentations used for estimating the biomarkers: one based on image interpolation and one based on segmentation interpolation. Using a database comprising scan-rescan cine CMR data acquired from 92 subjects, we compare the performance of these two methods against ground truth (GT) segmentations and DL segmentations obtained before interpolation (baseline). The results demonstrate that both the image-based and segmentation-based interpolation methods were able to narrow Bland-Altman scan-rescan confidence intervals for all biomarkers compared to the GT and baseline performances. Our findings highlight the importance of focusing not only on segmentation accuracy but also on the consistency of biomarkers across repeated scans, which is crucial for longitudinal analysis of cardiac function. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11754v1-abstract-full').style.display = 'none'; document.getElementById('2408.11754v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 3 figures, MICCAI STACOM 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.02462">arXiv:2408.02462</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.02462">pdf</a>, <a href="https://arxiv.org/format/2408.02462">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> An investigation into the causes of race bias in AI-based cine CMR segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lee%2C+T">Tiarna Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Anton%2C+E">Esther Puyol-Anton</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Roujol%2C+S">Sebastien Roujol</a>, <a href="/search/cs?searchtype=author&amp;query=Barfoot%2C+T">Theodore Barfoot</a>, <a href="/search/cs?searchtype=author&amp;query=Ogbomo-Harmitt%2C+S">Shaheim Ogbomo-Harmitt</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+M">Miaojing Shi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.02462v1-abstract-short" style="display: inline;"> Artificial intelligence (AI) methods are being used increasingly for the automated segmentation of cine cardiac magnetic resonance (CMR) imaging. However, these methods have been shown to be subject to race bias, i.e. they exhibit different levels of performance for different races depending on the (im)balance of the data used to train the AI model. In this paper we investigate the source of this&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.02462v1-abstract-full').style.display = 'inline'; document.getElementById('2408.02462v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.02462v1-abstract-full" style="display: none;"> Artificial intelligence (AI) methods are being used increasingly for the automated segmentation of cine cardiac magnetic resonance (CMR) imaging. However, these methods have been shown to be subject to race bias, i.e. they exhibit different levels of performance for different races depending on the (im)balance of the data used to train the AI model. In this paper we investigate the source of this bias, seeking to understand its root cause(s) so that it can be effectively mitigated. We perform a series of classification and segmentation experiments on short-axis cine CMR images acquired from Black and White subjects from the UK Biobank and apply AI interpretability methods to understand the results. In the classification experiments, we found that race can be predicted with high accuracy from the images alone, but less accurately from ground truth segmentations, suggesting that the distributional shift between races, which is often the cause of AI bias, is mostly image-based rather than segmentation-based. The interpretability methods showed that most attention in the classification models was focused on non-heart regions, such as subcutaneous fat. Cropping the images tightly around the heart reduced classification accuracy to around chance level. Similarly, race can be predicted from the latent representations of a biased segmentation model, suggesting that race information is encoded in the model. Cropping images tightly around the heart reduced but did not eliminate segmentation bias. We also investigate the influence of possible confounders on the bias observed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.02462v1-abstract-full').style.display = 'none'; document.getElementById('2408.02462v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.04557">arXiv:2406.04557</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.04557">pdf</a>, <a href="https://arxiv.org/format/2406.04557">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Countrywide natural experiment reveals impact of built environment on physical activity </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Althoff%2C+T">Tim Althoff</a>, <a href="/search/cs?searchtype=author&amp;query=Ivanovic%2C+B">Boris Ivanovic</a>, <a href="/search/cs?searchtype=author&amp;query=Hicks%2C+J+L">Jennifer L. Hicks</a>, <a href="/search/cs?searchtype=author&amp;query=Delp%2C+S+L">Scott L. Delp</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+C">Abby C. King</a>, <a href="/search/cs?searchtype=author&amp;query=Leskovec%2C+J">Jure Leskovec</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.04557v1-abstract-short" style="display: inline;"> While physical activity is critical to human health, most people do not meet recommended guidelines. More walkable built environments have the potential to increase activity across the population. However, previous studies on the built environment and physical activity have led to mixed findings, possibly due to methodological limitations such as small cohorts, few or single locations, over-relian&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.04557v1-abstract-full').style.display = 'inline'; document.getElementById('2406.04557v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.04557v1-abstract-full" style="display: none;"> While physical activity is critical to human health, most people do not meet recommended guidelines. More walkable built environments have the potential to increase activity across the population. However, previous studies on the built environment and physical activity have led to mixed findings, possibly due to methodological limitations such as small cohorts, few or single locations, over-reliance on self-reported measures, and cross-sectional designs. Here, we address these limitations by leveraging a large U.S. cohort of smartphone users (N=2,112,288) to evaluate within-person longitudinal behavior changes that occurred over 248,266 days of objectively-measured physical activity across 7,447 relocations among 1,609 U.S. cities. By analyzing the results of this natural experiment, which exposed individuals to differing built environments, we find that increases in walkability are associated with significant increases in physical activity after relocation (and vice versa). These changes hold across subpopulations of different genders, age, and body-mass index (BMI), and are sustained over three months after moving.The added activity observed after moving to a more walkable location is predominantly composed of moderate-to-vigorous physical activity (MVPA), which is linked to an array of associated health benefits across the life course. A simulation experiment demonstrates that substantial walkability improvements (i.e., bringing all US locations to the walkability level of Chicago or Philadelphia) may lead to 10.3% or 33 million more Americans meeting aerobic physical activity guidelines. Evidence against residential self-selection confounding is reported. Our findings provide robust evidence supporting the importance of the built environment in directly improving health-enhancing physical activity, in addition to offering potential guidance for public policy activities in this area. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.04557v1-abstract-full').style.display = 'none'; document.getElementById('2406.04557v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.06487">arXiv:2405.06487</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.06487">pdf</a>, <a href="https://arxiv.org/format/2405.06487">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Improving Deep Learning Model Calibration for Cardiac Applications using Deterministic Uncertainty Networks and Uncertainty-aware Training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dawood%2C+T">Tareen Dawood</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.06487v1-abstract-short" style="display: inline;"> Improving calibration performance in deep learning (DL) classification models is important when planning the use of DL in a decision-support setting. In such a scenario, a confident wrong prediction could lead to a lack of trust and/or harm in a high-risk application. We evaluate the impact on accuracy and calibration of two types of approach that aim to improve DL classification model calibration&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.06487v1-abstract-full').style.display = 'inline'; document.getElementById('2405.06487v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.06487v1-abstract-full" style="display: none;"> Improving calibration performance in deep learning (DL) classification models is important when planning the use of DL in a decision-support setting. In such a scenario, a confident wrong prediction could lead to a lack of trust and/or harm in a high-risk application. We evaluate the impact on accuracy and calibration of two types of approach that aim to improve DL classification model calibration: deterministic uncertainty methods (DUM) and uncertainty-aware training. Specifically, we test the performance of three DUMs and two uncertainty-aware training approaches as well as their combinations. To evaluate their utility, we use two realistic clinical applications from the field of cardiac imaging: artefact detection from phase contrast cardiac magnetic resonance (CMR) and disease diagnosis from the public ACDC CMR dataset. Our results indicate that both DUMs and uncertainty-aware training can improve both accuracy and calibration in both of our applications, with DUMs generally offering the best improvements. We also investigate the combination of the two approaches, resulting in a novel deterministic uncertainty-aware training approach. This provides further improvements for some combinations of DUMs and uncertainty-aware training approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.06487v1-abstract-full').style.display = 'none'; document.getElementById('2405.06487v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">currently under review for publication</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.05478">arXiv:2405.05478</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.05478">pdf</a>, <a href="https://arxiv.org/format/2405.05478">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Using Machine Translation to Augment Multilingual Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Adam King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.05478v1-abstract-short" style="display: inline;"> An all-too-present bottleneck for text classification model development is the need to annotate training data and this need is multiplied for multilingual classifiers. Fortunately, contemporary machine translation models are both easily accessible and have dependable translation quality, making it possible to translate labeled training data from one language into another. Here, we explore the effe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.05478v1-abstract-full').style.display = 'inline'; document.getElementById('2405.05478v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.05478v1-abstract-full" style="display: none;"> An all-too-present bottleneck for text classification model development is the need to annotate training data and this need is multiplied for multilingual classifiers. Fortunately, contemporary machine translation models are both easily accessible and have dependable translation quality, making it possible to translate labeled training data from one language into another. Here, we explore the effects of using machine translation to fine-tune a multilingual model for a classification task across multiple languages. We also investigate the benefits of using a novel technique, originally proposed in the field of image captioning, to account for potential negative effects of tuning models on translated data. We show that translated data are of sufficient quality to tune multilingual classifiers and that this novel loss technique is able to offer some improvement over models tuned without it. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.05478v1-abstract-full').style.display = 'none'; document.getElementById('2405.05478v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.07818">arXiv:2403.07818</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.07818">pdf</a>, <a href="https://arxiv.org/format/2403.07818">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Label Dropout: Improved Deep Learning Echocardiography Segmentation Using Multiple Datasets With Domain Shift and Partial Labelling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+I">Iman Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Reader%2C+A+J">Andrew J. Reader</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.07818v2-abstract-short" style="display: inline;"> Echocardiography (echo) is the first imaging modality used when assessing cardiac function. The measurement of functional biomarkers from echo relies upon the segmentation of cardiac structures and deep learning models have been proposed to automate the segmentation process. However, in order to translate these tools to widespread clinical use it is important that the segmentation models are robus&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07818v2-abstract-full').style.display = 'inline'; document.getElementById('2403.07818v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.07818v2-abstract-full" style="display: none;"> Echocardiography (echo) is the first imaging modality used when assessing cardiac function. The measurement of functional biomarkers from echo relies upon the segmentation of cardiac structures and deep learning models have been proposed to automate the segmentation process. However, in order to translate these tools to widespread clinical use it is important that the segmentation models are robust to a wide variety of images (e.g. acquired from different scanners, by operators with different levels of expertise etc.). To achieve this level of robustness it is necessary that the models are trained with multiple diverse datasets. A significant challenge faced when training with multiple diverse datasets is the variation in label presence, i.e. the combined data are often partially-labelled. Adaptations of the cross entropy loss function have been proposed to deal with partially labelled data. In this paper we show that training naively with such a loss function and multiple diverse datasets can lead to a form of shortcut learning, where the model associates label presence with domain characteristics, leading to a drop in performance. To address this problem, we propose a novel label dropout scheme to break the link between domain characteristics and the presence or absence of labels. We demonstrate that label dropout improves echo segmentation Dice score by 62% and 25% on two cardiac structures when training using multiple diverse partially labelled datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07818v2-abstract-full').style.display = 'none'; document.getElementById('2403.07818v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 5 figures, ASMUS 2024, Held in Conjunction with MICCAI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.13437">arXiv:2402.13437</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.13437">pdf</a>, <a href="https://arxiv.org/format/2402.13437">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3613904.3641896">10.1145/3613904.3641896 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Sketching AI Concepts with Capabilities and Examples: AI Innovation in the Intensive Care Unit </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yildirim%2C+N">Nur Yildirim</a>, <a href="/search/cs?searchtype=author&amp;query=Zlotnikov%2C+S">Susanna Zlotnikov</a>, <a href="/search/cs?searchtype=author&amp;query=Sayar%2C+D">Deniz Sayar</a>, <a href="/search/cs?searchtype=author&amp;query=Kahn%2C+J+M">Jeremy M. Kahn</a>, <a href="/search/cs?searchtype=author&amp;query=Bukowski%2C+L+A">Leigh A. Bukowski</a>, <a href="/search/cs?searchtype=author&amp;query=Amin%2C+S+S">Sher Shah Amin</a>, <a href="/search/cs?searchtype=author&amp;query=Riman%2C+K+A">Kathryn A. Riman</a>, <a href="/search/cs?searchtype=author&amp;query=Davis%2C+B+S">Billie S. Davis</a>, <a href="/search/cs?searchtype=author&amp;query=Minturn%2C+J+S">John S. Minturn</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+J">Andrew J. King</a>, <a href="/search/cs?searchtype=author&amp;query=Ricketts%2C+D">Dan Ricketts</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+L">Lu Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Sivaraman%2C+V">Venkatesh Sivaraman</a>, <a href="/search/cs?searchtype=author&amp;query=Perer%2C+A">Adam Perer</a>, <a href="/search/cs?searchtype=author&amp;query=Preum%2C+S+M">Sarah M. Preum</a>, <a href="/search/cs?searchtype=author&amp;query=McCann%2C+J">James McCann</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmerman%2C+J">John Zimmerman</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.13437v1-abstract-short" style="display: inline;"> Advances in artificial intelligence (AI) have enabled unprecedented capabilities, yet innovation teams struggle when envisioning AI concepts. Data science teams think of innovations users do not want, while domain experts think of innovations that cannot be built. A lack of effective ideation seems to be a breakdown point. How might multidisciplinary teams identify buildable and desirable use case&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.13437v1-abstract-full').style.display = 'inline'; document.getElementById('2402.13437v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.13437v1-abstract-full" style="display: none;"> Advances in artificial intelligence (AI) have enabled unprecedented capabilities, yet innovation teams struggle when envisioning AI concepts. Data science teams think of innovations users do not want, while domain experts think of innovations that cannot be built. A lack of effective ideation seems to be a breakdown point. How might multidisciplinary teams identify buildable and desirable use cases? This paper presents a first hand account of ideating AI concepts to improve critical care medicine. As a team of data scientists, clinicians, and HCI researchers, we conducted a series of design workshops to explore more effective approaches to AI concept ideation and problem formulation. We detail our process, the challenges we encountered, and practices and artifacts that proved effective. We discuss the research implications for improved collaboration and stakeholder engagement, and discuss the role HCI might play in reducing the high failure rate experienced in AI innovation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.13437v1-abstract-full').style.display = 'none'; document.getElementById('2402.13437v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">to appear at CHI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.11197">arXiv:2401.11197</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.11197">pdf</a>, <a href="https://arxiv.org/format/2401.11197">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Logic in Computer Science">cs.LO</span> </div> </div> <p class="title is-5 mathjax"> Timeout Asynchronous Session Types: Safe Asynchronous Mixed-Choice For Timed Interactions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pears%2C+J">Jonah Pears</a>, <a href="/search/cs?searchtype=author&amp;query=Bocchi%2C+L">Laura Bocchi</a>, <a href="/search/cs?searchtype=author&amp;query=Murgia%2C+M">Maurizio Murgia</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Andy King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.11197v2-abstract-short" style="display: inline;"> Mixed-choice has long been barred from models of asynchronous communication since it compromises the decidability of key properties of communicating finite-state machines. Session types inherit this restriction, which precludes them from fully modelling timeouts -- a core property of web and cloud services. To address this deficiency, we present (binary) Timeout Asynchronous Session Types (TOAST)&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.11197v2-abstract-full').style.display = 'inline'; document.getElementById('2401.11197v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.11197v2-abstract-full" style="display: none;"> Mixed-choice has long been barred from models of asynchronous communication since it compromises the decidability of key properties of communicating finite-state machines. Session types inherit this restriction, which precludes them from fully modelling timeouts -- a core property of web and cloud services. To address this deficiency, we present (binary) Timeout Asynchronous Session Types (TOAST) as an extension to (binary) asynchronous timed session types, that permits mixed-choice. TOAST deploys timing constraints to regulate the use of mixed-choice so as to preserve communication safety. We provide a new behavioural semantics for TOAST which guarantees progress in the presence of mixed-choice. Building upon TOAST, we provide a calculus featuring process timers which is capable of modelling timeouts using a $\mathtt{receive-after}$ pattern, much like Erlang, and capture the correspondence with TOAST specifications via a type system for which we prove subject reduction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.11197v2-abstract-full').style.display = 'none'; document.getElementById('2401.11197v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">65 pages, revised for LMCS</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.11390">arXiv:2311.11390</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.11390">pdf</a>, <a href="https://arxiv.org/format/2311.11390">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Addressing the speed-accuracy simulation trade-off for adaptive spiking neurons </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Taylor%2C+L">Luke Taylor</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+J">Andrew J King</a>, <a href="/search/cs?searchtype=author&amp;query=Harper%2C+N+S">Nicol S Harper</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.11390v1-abstract-short" style="display: inline;"> The adaptive leaky integrate-and-fire (ALIF) model is fundamental within computational neuroscience and has been instrumental in studying our brains $\textit{in silico}$. Due to the sequential nature of simulating these neural models, a commonly faced issue is the speed-accuracy trade-off: either accurately simulate a neuron using a small discretisation time-step (DT), which is slow, or more quick&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.11390v1-abstract-full').style.display = 'inline'; document.getElementById('2311.11390v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.11390v1-abstract-full" style="display: none;"> The adaptive leaky integrate-and-fire (ALIF) model is fundamental within computational neuroscience and has been instrumental in studying our brains $\textit{in silico}$. Due to the sequential nature of simulating these neural models, a commonly faced issue is the speed-accuracy trade-off: either accurately simulate a neuron using a small discretisation time-step (DT), which is slow, or more quickly simulate a neuron using a larger DT and incur a loss in simulation accuracy. Here we provide a solution to this dilemma, by algorithmically reinterpreting the ALIF model, reducing the sequential simulation complexity and permitting a more efficient parallelisation on GPUs. We computationally validate our implementation to obtain over a $50\times$ training speedup using small DTs on synthetic benchmarks. We also obtained a comparable performance to the standard ALIF implementation on different supervised classification tasks - yet in a fraction of the training time. Lastly, we showcase how our model makes it possible to quickly and accurately fit real electrophysiological recordings of cortical neurons, where very fine sub-millisecond DTs are crucial for capturing exact spike timing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.11390v1-abstract-full').style.display = 'none'; document.getElementById('2311.11390v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 5 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Thirty-seventh Conference on Neural Information Processing Systems. 2023 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.07234">arXiv:2311.07234</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.07234">pdf</a>, <a href="https://arxiv.org/format/2311.07234">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.59275/j.melba.2023-b7bc">10.59275/j.melba.2023-b7bc <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Multi-task learning for joint weakly-supervised segmentation and aortic arch anomaly classification in fetal cardiac MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ramirez%2C+P">Paula Ramirez</a>, <a href="/search/cs?searchtype=author&amp;query=Uus%2C+A">Alena Uus</a>, <a href="/search/cs?searchtype=author&amp;query=van+Poppel%2C+M+P+M">Milou P. M. van Poppel</a>, <a href="/search/cs?searchtype=author&amp;query=Grigorescu%2C+I">Irina Grigorescu</a>, <a href="/search/cs?searchtype=author&amp;query=Steinweg%2C+J+K">Johannes K. Steinweg</a>, <a href="/search/cs?searchtype=author&amp;query=Lloyd%2C+D+F+A">David F. A. Lloyd</a>, <a href="/search/cs?searchtype=author&amp;query=Pushparajah%2C+K">Kuberan Pushparajah</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a>, <a href="/search/cs?searchtype=author&amp;query=Deprez%2C+M">Maria Deprez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.07234v1-abstract-short" style="display: inline;"> Congenital Heart Disease (CHD) is a group of cardiac malformations present already during fetal life, representing the prevailing category of birth defects globally. Our aim in this study is to aid 3D fetal vessel topology visualisation in aortic arch anomalies, a group which encompasses a range of conditions with significant anatomical heterogeneity. We present a multi-task framework for automate&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.07234v1-abstract-full').style.display = 'inline'; document.getElementById('2311.07234v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.07234v1-abstract-full" style="display: none;"> Congenital Heart Disease (CHD) is a group of cardiac malformations present already during fetal life, representing the prevailing category of birth defects globally. Our aim in this study is to aid 3D fetal vessel topology visualisation in aortic arch anomalies, a group which encompasses a range of conditions with significant anatomical heterogeneity. We present a multi-task framework for automated multi-class fetal vessel segmentation from 3D black blood T2w MRI and anomaly classification. Our training data consists of binary manual segmentation masks of the cardiac vessels&#39; region in individual subjects and fully-labelled anomaly-specific population atlases. Our framework combines deep learning label propagation using VoxelMorph with 3D Attention U-Net segmentation and DenseNet121 anomaly classification. We target 11 cardiac vessels and three distinct aortic arch anomalies, including double aortic arch, right aortic arch, and suspected coarctation of the aorta. We incorporate an anomaly classifier into our segmentation pipeline, delivering a multi-task framework with the primary motivation of correcting topological inaccuracies of the segmentation. The hypothesis is that the multi-task approach will encourage the segmenter network to learn anomaly-specific features. As a secondary motivation, an automated diagnosis tool may have the potential to enhance diagnostic confidence in a decision support setting. Our results showcase that our proposed training strategy significantly outperforms label propagation and a network trained exclusively on propagated labels. Our classifier outperforms a classifier trained exclusively on T2w volume images, with an average balanced accuracy of 0.99 (0.01) after joint training. Adding a classifier improves the anatomical and topological accuracy of all correctly classified double aortic arch subjects. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.07234v1-abstract-full').style.display = 'none'; document.getElementById('2311.07234v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at the Journal of Machine Learning for Biomedical Imaging (MELBA) https://melba-journal.org/2023:015</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Machine.Learning.for.Biomedical.Imaging. 2 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.19304">arXiv:2310.19304</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.19304">pdf</a>, <a href="https://arxiv.org/format/2310.19304">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Privacy-Preserving Federated Learning over Vertically and Horizontally Partitioned Data for Financial Anomaly Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kadhe%2C+S+R">Swanand Ravindra Kadhe</a>, <a href="/search/cs?searchtype=author&amp;query=Ludwig%2C+H">Heiko Ludwig</a>, <a href="/search/cs?searchtype=author&amp;query=Baracaldo%2C+N">Nathalie Baracaldo</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Alan King</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yi Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Houck%2C+K">Keith Houck</a>, <a href="/search/cs?searchtype=author&amp;query=Rawat%2C+A">Ambrish Rawat</a>, <a href="/search/cs?searchtype=author&amp;query=Purcell%2C+M">Mark Purcell</a>, <a href="/search/cs?searchtype=author&amp;query=Holohan%2C+N">Naoise Holohan</a>, <a href="/search/cs?searchtype=author&amp;query=Takeuchi%2C+M">Mikio Takeuchi</a>, <a href="/search/cs?searchtype=author&amp;query=Kawahara%2C+R">Ryo Kawahara</a>, <a href="/search/cs?searchtype=author&amp;query=Drucker%2C+N">Nir Drucker</a>, <a href="/search/cs?searchtype=author&amp;query=Shaul%2C+H">Hayim Shaul</a>, <a href="/search/cs?searchtype=author&amp;query=Kushnir%2C+E">Eyal Kushnir</a>, <a href="/search/cs?searchtype=author&amp;query=Soceanu%2C+O">Omri Soceanu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.19304v1-abstract-short" style="display: inline;"> The effective detection of evidence of financial anomalies requires collaboration among multiple entities who own a diverse set of data, such as a payment network system (PNS) and its partner banks. Trust among these financial institutions is limited by regulation and competition. Federated learning (FL) enables entities to collaboratively train a model when data is either vertically or horizontal&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.19304v1-abstract-full').style.display = 'inline'; document.getElementById('2310.19304v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.19304v1-abstract-full" style="display: none;"> The effective detection of evidence of financial anomalies requires collaboration among multiple entities who own a diverse set of data, such as a payment network system (PNS) and its partner banks. Trust among these financial institutions is limited by regulation and competition. Federated learning (FL) enables entities to collaboratively train a model when data is either vertically or horizontally partitioned across the entities. However, in real-world financial anomaly detection scenarios, the data is partitioned both vertically and horizontally and hence it is not possible to use existing FL approaches in a plug-and-play manner. Our novel solution, PV4FAD, combines fully homomorphic encryption (HE), secure multi-party computation (SMPC), differential privacy (DP), and randomization techniques to balance privacy and accuracy during training and to prevent inference threats at model deployment time. Our solution provides input privacy through HE and SMPC, and output privacy against inference time attacks through DP. Specifically, we show that, in the honest-but-curious threat model, banks do not learn any sensitive features about PNS transactions, and the PNS does not learn any information about the banks&#39; dataset but only learns prediction labels. We also develop and analyze a DP mechanism to protect output privacy during inference. Our solution generates high-utility models by significantly reducing the per-bank noise level while satisfying distributed DP. To ensure high accuracy, our approach produces an ensemble model, in particular, a random forest. This enables us to take advantage of the well-known properties of ensembles to reduce variance and increase accuracy. Our solution won second prize in the first phase of the U.S. Privacy Enhancing Technologies (PETs) Prize Challenge. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.19304v1-abstract-full').style.display = 'none'; document.getElementById('2310.19304v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Prize Winner in the U.S. Privacy Enhancing Technologies (PETs) Prize Challenge</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.17116">arXiv:2310.17116</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.17116">pdf</a>, <a href="https://arxiv.org/format/2310.17116">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> </div> <p class="title is-5 mathjax"> Real-time Neonatal Chest Sound Separation using Deep Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Poh%2C+Y+Y">Yang Yi Poh</a>, <a href="/search/cs?searchtype=author&amp;query=Grooby%2C+E">Ethan Grooby</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+K">Kenneth Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+L">Lindsay Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Arrabella King</a>, <a href="/search/cs?searchtype=author&amp;query=Ramanathan%2C+A">Ashwin Ramanathan</a>, <a href="/search/cs?searchtype=author&amp;query=Malhotra%2C+A">Atul Malhotra</a>, <a href="/search/cs?searchtype=author&amp;query=Harandi%2C+M">Mehrtash Harandi</a>, <a href="/search/cs?searchtype=author&amp;query=Marzbanrad%2C+F">Faezeh Marzbanrad</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.17116v1-abstract-short" style="display: inline;"> Auscultation for neonates is a simple and non-invasive method of providing diagnosis for cardiovascular and respiratory disease. Such diagnosis often requires high-quality heart and lung sounds to be captured during auscultation. However, in most cases, obtaining such high-quality sounds is non-trivial due to the chest sounds containing a mixture of heart, lung, and noise sounds. As such, addition&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.17116v1-abstract-full').style.display = 'inline'; document.getElementById('2310.17116v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.17116v1-abstract-full" style="display: none;"> Auscultation for neonates is a simple and non-invasive method of providing diagnosis for cardiovascular and respiratory disease. Such diagnosis often requires high-quality heart and lung sounds to be captured during auscultation. However, in most cases, obtaining such high-quality sounds is non-trivial due to the chest sounds containing a mixture of heart, lung, and noise sounds. As such, additional preprocessing is needed to separate the chest sounds into heart and lung sounds. This paper proposes a novel deep-learning approach to separate such chest sounds into heart and lung sounds. Inspired by the Conv-TasNet model, the proposed model has an encoder, decoder, and mask generator. The encoder consists of a 1D convolution model and the decoder consists of a transposed 1D convolution. The mask generator is constructed using stacked 1D convolutions and transformers. The proposed model outperforms previous methods in terms of objective distortion measures by 2.01 dB to 5.06 dB in the artificial dataset, as well as computation time, with at least a 17-time improvement. Therefore, our proposed model could be a suitable preprocessing step for any phonocardiogram-based health monitoring system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.17116v1-abstract-full').style.display = 'none'; document.getElementById('2310.17116v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.13810">arXiv:2310.13810</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.13810">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> A Better Match for Drivers and Riders: Reinforcement Learning at Lyft </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Azagirre%2C+X">Xabi Azagirre</a>, <a href="/search/cs?searchtype=author&amp;query=Balwally%2C+A">Akshay Balwally</a>, <a href="/search/cs?searchtype=author&amp;query=Candeli%2C+G">Guillaume Candeli</a>, <a href="/search/cs?searchtype=author&amp;query=Chamandy%2C+N">Nicholas Chamandy</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+B">Benjamin Han</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Alona King</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+H">Hyungjun Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Loncaric%2C+M">Martin Loncaric</a>, <a href="/search/cs?searchtype=author&amp;query=Martin%2C+S">Sebastien Martin</a>, <a href="/search/cs?searchtype=author&amp;query=Narasiman%2C+V">Vijay Narasiman</a>, <a href="/search/cs?searchtype=author&amp;query=Zhiwei"> Zhiwei</a>, <a href="/search/cs?searchtype=author&amp;query=Qin"> Qin</a>, <a href="/search/cs?searchtype=author&amp;query=Richard%2C+B">Baptiste Richard</a>, <a href="/search/cs?searchtype=author&amp;query=Smoot%2C+S">Sara Smoot</a>, <a href="/search/cs?searchtype=author&amp;query=Taylor%2C+S">Sean Taylor</a>, <a href="/search/cs?searchtype=author&amp;query=van+Ryzin%2C+G">Garrett van Ryzin</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+D">Di Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+F">Fei Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Zamoshchin%2C+A">Alex Zamoshchin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.13810v2-abstract-short" style="display: inline;"> To better match drivers to riders in our ridesharing application, we revised Lyft&#39;s core matching algorithm. We use a novel online reinforcement learning approach that estimates the future earnings of drivers in real time and use this information to find more efficient matches. This change was the first documented implementation of a ridesharing matching algorithm that can learn and improve in rea&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.13810v2-abstract-full').style.display = 'inline'; document.getElementById('2310.13810v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.13810v2-abstract-full" style="display: none;"> To better match drivers to riders in our ridesharing application, we revised Lyft&#39;s core matching algorithm. We use a novel online reinforcement learning approach that estimates the future earnings of drivers in real time and use this information to find more efficient matches. This change was the first documented implementation of a ridesharing matching algorithm that can learn and improve in real time. We evaluated the new approach during weeks of switchback experimentation in most Lyft markets, and estimated how it benefited drivers, riders, and the platform. In particular, it enabled our drivers to serve millions of additional riders each year, leading to more than $30 million per year in incremental revenue. Lyft rolled out the algorithm globally in 2021. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.13810v2-abstract-full').style.display = 'none'; document.getElementById('2310.13810v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.17197">arXiv:2309.17197</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.17197">pdf</a>, <a href="https://arxiv.org/ps/2309.17197">ps</a>, <a href="https://arxiv.org/format/2309.17197">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> An Investigation Into Race Bias in Random Forest Models Based on Breast DCE-MRI Derived Radiomics Features </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huti%2C+M">Mohamed Huti</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+T">Tiarna Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Sawyer%2C+E">Elinor Sawyer</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.17197v1-abstract-short" style="display: inline;"> Recent research has shown that artificial intelligence (AI) models can exhibit bias in performance when trained using data that are imbalanced by protected attribute(s). Most work to date has focused on deep learning models, but classical AI techniques that make use of hand-crafted features may also be susceptible to such bias. In this paper we investigate the potential for race bias in random for&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.17197v1-abstract-full').style.display = 'inline'; document.getElementById('2309.17197v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.17197v1-abstract-full" style="display: none;"> Recent research has shown that artificial intelligence (AI) models can exhibit bias in performance when trained using data that are imbalanced by protected attribute(s). Most work to date has focused on deep learning models, but classical AI techniques that make use of hand-crafted features may also be susceptible to such bias. In this paper we investigate the potential for race bias in random forest (RF) models trained using radiomics features. Our application is prediction of tumour molecular subtype from dynamic contrast enhanced magnetic resonance imaging (DCE-MRI) of breast cancer patients. Our results show that radiomics features derived from DCE-MRI data do contain race-identifiable information, and that RF models can be trained to predict White and Black race from these data with 60-70% accuracy, depending on the subset of features used. Furthermore, RF models trained to predict tumour molecular subtype using race-imbalanced data seem to produce biased behaviour, exhibiting better performance on test data from the race on which they were trained. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.17197v1-abstract-full').style.display = 'none'; document.getElementById('2309.17197v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at the MICCAI Workshop on Fairness of AI in Medical Imaging (FAIMI) 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.15141">arXiv:2308.15141</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.15141">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.media.2023.102861">10.1016/j.media.2023.102861 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Uncertainty Aware Training to Improve Deep Learning Model Calibration for Classification of Cardiac MR Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dawood%2C+T">Tareen Dawood</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Sidhua%2C+B+S">Baldeep S. Sidhua</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Goulda%2C+J">Justin Goulda</a>, <a href="/search/cs?searchtype=author&amp;query=Porter%2C+B">Bradley Porter</a>, <a href="/search/cs?searchtype=author&amp;query=Elliott%2C+M+K">Mark K. Elliott</a>, <a href="/search/cs?searchtype=author&amp;query=Mehta%2C+V">Vishal Mehta</a>, <a href="/search/cs?searchtype=author&amp;query=Rinaldi%2C+C+A">Christopher A. Rinaldi</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Anton%2C+E">Esther Puyol-Anton</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.15141v1-abstract-short" style="display: inline;"> Quantifying uncertainty of predictions has been identified as one way to develop more trustworthy artificial intelligence (AI) models beyond conventional reporting of performance metrics. When considering their role in a clinical decision support setting, AI classification models should ideally avoid confident wrong predictions and maximise the confidence of correct predictions. Models that do thi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.15141v1-abstract-full').style.display = 'inline'; document.getElementById('2308.15141v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.15141v1-abstract-full" style="display: none;"> Quantifying uncertainty of predictions has been identified as one way to develop more trustworthy artificial intelligence (AI) models beyond conventional reporting of performance metrics. When considering their role in a clinical decision support setting, AI classification models should ideally avoid confident wrong predictions and maximise the confidence of correct predictions. Models that do this are said to be well-calibrated with regard to confidence. However, relatively little attention has been paid to how to improve calibration when training these models, i.e., to make the training strategy uncertainty-aware. In this work we evaluate three novel uncertainty-aware training strategies comparing against two state-of-the-art approaches. We analyse performance on two different clinical applications: cardiac resynchronisation therapy (CRT) response prediction and coronary artery disease (CAD) diagnosis from cardiac magnetic resonance (CMR) images. The best-performing model in terms of both classification accuracy and the most common calibration measure, expected calibration error (ECE) was the Confidence Weight method, a novel approach that weights the loss of samples to explicitly penalise confident incorrect predictions. The method reduced the ECE by 17% for CRT response prediction and by 22% for CAD diagnosis when compared to a baseline classifier in which no uncertainty-aware strategy was included. In both applications, as well as reducing the ECE there was a slight increase in accuracy from 69% to 70% and 70% to 72% for CRT response prediction and CAD diagnosis respectively. However, our analysis showed a lack of consistency in terms of optimal models when using different calibration measures. This indicates the need for careful consideration of performance metrics when training and selecting models for complex high-risk applications in healthcare. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.15141v1-abstract-full').style.display = 'none'; document.getElementById('2308.15141v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.13861">arXiv:2308.13861</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.13861">pdf</a>, <a href="https://arxiv.org/format/2308.13861">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Bias in Unsupervised Anomaly Detection in Brain MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bercea%2C+C+I">Cosmin I. Bercea</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.13861v1-abstract-short" style="display: inline;"> Unsupervised anomaly detection methods offer a promising and flexible alternative to supervised approaches, holding the potential to revolutionize medical scan analysis and enhance diagnostic performance. In the current landscape, it is commonly assumed that differences between a test case and the training distribution are attributed solely to pathological conditions, implying that any disparity&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.13861v1-abstract-full').style.display = 'inline'; document.getElementById('2308.13861v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.13861v1-abstract-full" style="display: none;"> Unsupervised anomaly detection methods offer a promising and flexible alternative to supervised approaches, holding the potential to revolutionize medical scan analysis and enhance diagnostic performance. In the current landscape, it is commonly assumed that differences between a test case and the training distribution are attributed solely to pathological conditions, implying that any disparity indicates an anomaly. However, the presence of other potential sources of distributional shift, including scanner, age, sex, or race, is frequently overlooked. These shifts can significantly impact the accuracy of the anomaly detection task. Prominent instances of such failures have sparked concerns regarding the bias, credibility, and fairness of anomaly detection. This work presents a novel analysis of biases in unsupervised anomaly detection. By examining potential non-pathological distributional shifts between the training and testing distributions, we shed light on the extent of these biases and their influence on anomaly detection results. Moreover, this study examines the algorithmic limitations that arise due to biases, providing valuable insights into the challenges encountered by anomaly detection algorithms in accurately learning and capturing the entire range of variability present in the normative distribution. Through this analysis, we aim to enhance the understanding of these biases and pave the way for future improvements in the field. Here, we specifically investigate Alzheimer&#39;s disease detection from brain MR imaging as a case study, revealing significant biases related to sex, race, and scanner variations that substantially impact the results. These findings align with the broader goal of improving the reliability, fairness, and effectiveness of anomaly detection in medical imaging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.13861v1-abstract-full').style.display = 'none'; document.getElementById('2308.13861v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.13415">arXiv:2308.13415</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.13415">pdf</a>, <a href="https://arxiv.org/format/2308.13415">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> An investigation into the impact of deep learning model choice on sex and race bias in cardiac MR segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lee%2C+T">Tiarna Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Aitcheson%2C+K">Keana Aitcheson</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+M">Miaojing Shi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.13415v1-abstract-short" style="display: inline;"> In medical imaging, artificial intelligence (AI) is increasingly being used to automate routine tasks. However, these algorithms can exhibit and exacerbate biases which lead to disparate performances between protected groups. We investigate the impact of model choice on how imbalances in subject sex and race in training datasets affect AI-based cine cardiac magnetic resonance image segmentation. W&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.13415v1-abstract-full').style.display = 'inline'; document.getElementById('2308.13415v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.13415v1-abstract-full" style="display: none;"> In medical imaging, artificial intelligence (AI) is increasingly being used to automate routine tasks. However, these algorithms can exhibit and exacerbate biases which lead to disparate performances between protected groups. We investigate the impact of model choice on how imbalances in subject sex and race in training datasets affect AI-based cine cardiac magnetic resonance image segmentation. We evaluate three convolutional neural network-based models and one vision transformer model. We find significant sex bias in three of the four models and racial bias in all of the models. However, the severity and nature of the bias varies between the models, highlighting the importance of model choice when attempting to train fair AI-based segmentation models for medical imaging tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.13415v1-abstract-full').style.display = 'none'; document.getElementById('2308.13415v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.08038">arXiv:2308.08038</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.08038">pdf</a>, <a href="https://arxiv.org/format/2308.08038">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning Framework for Spleen Volume Estimation from 2D Cross-sectional Views </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+Z">Zhen Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Anton%2C+E">Esther Puyol-Anton</a>, <a href="/search/cs?searchtype=author&amp;query=Jogeesvaran%2C+H">Haran Jogeesvaran</a>, <a href="/search/cs?searchtype=author&amp;query=Inusa%2C+B">Baba Inusa</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.08038v2-abstract-short" style="display: inline;"> Abnormal spleen enlargement (splenomegaly) is regarded as a clinical indicator for a range of conditions, including liver disease, cancer and blood diseases. While spleen length measured from ultrasound images is a commonly used surrogate for spleen size, spleen volume remains the gold standard metric for assessing splenomegaly and the severity of related clinical conditions. Computed tomography i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.08038v2-abstract-full').style.display = 'inline'; document.getElementById('2308.08038v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.08038v2-abstract-full" style="display: none;"> Abnormal spleen enlargement (splenomegaly) is regarded as a clinical indicator for a range of conditions, including liver disease, cancer and blood diseases. While spleen length measured from ultrasound images is a commonly used surrogate for spleen size, spleen volume remains the gold standard metric for assessing splenomegaly and the severity of related clinical conditions. Computed tomography is the main imaging modality for measuring spleen volume, but it is less accessible in areas where there is a high prevalence of splenomegaly (e.g., the Global South). Our objective was to enable automated spleen volume measurement from 2D cross-sectional segmentations, which can be obtained from ultrasound imaging. In this study, we describe a variational autoencoder-based framework to measure spleen volume from single- or dual-view 2D spleen segmentations. We propose and evaluate three volume estimation methods within this framework. We also demonstrate how 95% confidence intervals of volume estimates can be produced to make our method more clinically useful. Our best model achieved mean relative volume accuracies of 86.62% and 92.58% for single- and dual-view segmentations, respectively, surpassing the performance of the clinical standard approach of linear regression using manual measurements and a comparative deep learning-based 2D-3D reconstruction-based approach. The proposed spleen volume estimation framework can be integrated into standard clinical workflows which currently use 2D ultrasound images to measure spleen length. To the best of our knowledge, this is the first work to achieve direct 3D spleen volume estimation from 2D spleen segmentations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.08038v2-abstract-full').style.display = 'none'; document.getElementById('2308.08038v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.12688">arXiv:2307.12688</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.12688">pdf</a>, <a href="https://arxiv.org/ps/2307.12688">ps</a>, <a href="https://arxiv.org/format/2307.12688">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-031-35361-1_12">10.1007/978-3-031-35361-1_12 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Safe asynchronous mixed-choice for timed interactions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pears%2C+J">Jonah Pears</a>, <a href="/search/cs?searchtype=author&amp;query=Bocchi%2C+L">Laura Bocchi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Andy King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.12688v1-abstract-short" style="display: inline;"> Mixed-choice has long been barred from models of asynchronous communication since it compromises key properties of communicating finite-state machines. Session types inherit this restriction, which precludes them from fully modelling timeouts -- a key programming feature to handle failures. To address this deficiency, we present (binary) TimeOut Asynchronous Session Types ({TOAST}) as an extension&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.12688v1-abstract-full').style.display = 'inline'; document.getElementById('2307.12688v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.12688v1-abstract-full" style="display: none;"> Mixed-choice has long been barred from models of asynchronous communication since it compromises key properties of communicating finite-state machines. Session types inherit this restriction, which precludes them from fully modelling timeouts -- a key programming feature to handle failures. To address this deficiency, we present (binary) TimeOut Asynchronous Session Types ({TOAST}) as an extension to (binary) asynchronous timed session types to permit mixed-choice. {TOAST} deploy timing constraints to regulate the use of mixed-choice so as to preserve communication safety. We provide a new behavioural semantics for {TOAST} which guarantees progress in the presence of mixed-choice. Building upon {TOAST}, we provide a calculus featuring process timers which is capable of modelling timeouts using a $\mathtt{receive\text{-}after}$ pattern, much like Erlang, and informally illustrate the correspondence with TOAST specifications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.12688v1-abstract-full').style.display = 'none'; document.getElementById('2307.12688v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 8 figures, accepted at COORDINATION 2023</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> COORDINATION 2023: Lecture Notes in Computer Science, volume 13908, pages 214--231 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.04739">arXiv:2306.04739</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.04739">pdf</a>, <a href="https://arxiv.org/format/2306.04739">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Automatic retrieval of corresponding US views in longitudinal examinations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kerdegari%2C+H">Hamideh Kerdegari</a>, <a href="/search/cs?searchtype=author&amp;query=Phung1%2C+T+H+N">Tran Huy Nhat Phung1</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+V+H">Van Hao Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Truong%2C+T+P+T">Thi Phuong Thao Truong</a>, <a href="/search/cs?searchtype=author&amp;query=Le%2C+N+M+T">Ngoc Minh Thu Le</a>, <a href="/search/cs?searchtype=author&amp;query=Le%2C+T+P">Thanh Phuong Le</a>, <a href="/search/cs?searchtype=author&amp;query=Le%2C+T+M+T">Thi Mai Thao Le</a>, <a href="/search/cs?searchtype=author&amp;query=Pisani%2C+L">Luigi Pisani</a>, <a href="/search/cs?searchtype=author&amp;query=Denehy%2C+L">Linda Denehy</a>, <a href="/search/cs?searchtype=author&amp;query=Consortium%2C+V">Vital Consortium</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=Thwaites%2C+L">Louise Thwaites</a>, <a href="/search/cs?searchtype=author&amp;query=Yacoub%2C+S">Sophie Yacoub</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.04739v1-abstract-short" style="display: inline;"> Skeletal muscle atrophy is a common occurrence in critically ill patients in the intensive care unit (ICU) who spend long periods in bed. Muscle mass must be recovered through physiotherapy before patient discharge and ultrasound imaging is frequently used to assess the recovery process by measuring the muscle size over time. However, these manual measurements are subject to large variability, par&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.04739v1-abstract-full').style.display = 'inline'; document.getElementById('2306.04739v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.04739v1-abstract-full" style="display: none;"> Skeletal muscle atrophy is a common occurrence in critically ill patients in the intensive care unit (ICU) who spend long periods in bed. Muscle mass must be recovered through physiotherapy before patient discharge and ultrasound imaging is frequently used to assess the recovery process by measuring the muscle size over time. However, these manual measurements are subject to large variability, particularly since the scans are typically acquired on different days and potentially by different operators. In this paper, we propose a self-supervised contrastive learning approach to automatically retrieve similar ultrasound muscle views at different scan times. Three different models were compared using data from 67 patients acquired in the ICU. Results indicate that our contrastive model outperformed a supervised baseline model in the task of view retrieval with an AUC of 73.52% and when combined with an automatic segmentation model achieved 5.7%+/-0.24% error in cross-sectional area. Furthermore, a user study survey confirmed the efficacy of our model for muscle view retrieval. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.04739v1-abstract-full').style.display = 'none'; document.getElementById('2306.04739v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.03391">arXiv:2305.03391</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.03391">pdf</a>, <a href="https://arxiv.org/format/2305.03391">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Compressing audio CNNs with graph centrality based filter pruning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=King%2C+J+A">James A King</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+A">Arshdeep Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Plumbley%2C+M+D">Mark D. Plumbley</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.03391v1-abstract-short" style="display: inline;"> Convolutional neural networks (CNNs) are commonplace in high-performing solutions to many real-world problems, such as audio classification. CNNs have many parameters and filters, with some having a larger impact on the performance than others. This means that networks may contain many unnecessary filters, increasing a CNN&#39;s computation and memory requirements while providing limited performance b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.03391v1-abstract-full').style.display = 'inline'; document.getElementById('2305.03391v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.03391v1-abstract-full" style="display: none;"> Convolutional neural networks (CNNs) are commonplace in high-performing solutions to many real-world problems, such as audio classification. CNNs have many parameters and filters, with some having a larger impact on the performance than others. This means that networks may contain many unnecessary filters, increasing a CNN&#39;s computation and memory requirements while providing limited performance benefits. To make CNNs more efficient, we propose a pruning framework that eliminates filters with the highest &#34;commonality&#34;. We measure this commonality using the graph-theoretic concept of &#34;centrality&#34;. We hypothesise that a filter with a high centrality should be eliminated as it represents commonality and can be replaced by other filters without affecting the performance of a network much. An experimental evaluation of the proposed framework is performed on acoustic scene classification and audio tagging. On the DCASE 2021 Task 1A baseline network, our proposed method reduces computations per inference by 71\% with 50\% fewer parameters at less than a two percentage point drop in accuracy compared to the original network. For large-scale CNNs such as PANNs designed for audio tagging, our method reduces 24\% computations per inference with 41\% fewer parameters at a slight improvement in performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.03391v1-abstract-full').style.display = 'none'; document.getElementById('2305.03391v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.14510">arXiv:2212.14510</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2212.14510">pdf</a>, <a href="https://arxiv.org/format/2212.14510">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> A Machine Learning Case Study for AI-empowered echocardiography of Intensive Care Unit Patients in low- and middle-income countries </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xochicale%2C+M">Miguel Xochicale</a>, <a href="/search/cs?searchtype=author&amp;query=Thwaites%2C+L">Louise Thwaites</a>, <a href="/search/cs?searchtype=author&amp;query=Yacoub%2C+S">Sophie Yacoub</a>, <a href="/search/cs?searchtype=author&amp;query=Pisani%2C+L">Luigi Pisani</a>, <a href="/search/cs?searchtype=author&amp;query=Tran-Huy%2C+P">Phung-Nhat Tran-Huy</a>, <a href="/search/cs?searchtype=author&amp;query=Kerdegari%2C+H">Hamideh Kerdegari</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Andrew King</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.14510v2-abstract-short" style="display: inline;"> We present a Machine Learning (ML) study case to illustrate the challenges of clinical translation for a real-time AI-empowered echocardiography system with data of ICU patients in LMICs. Such ML case study includes data preparation, curation and labelling from 2D Ultrasound videos of 31 ICU patients in LMICs and model selection, validation and deployment of three thinner neural networks to classi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.14510v2-abstract-full').style.display = 'inline'; document.getElementById('2212.14510v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.14510v2-abstract-full" style="display: none;"> We present a Machine Learning (ML) study case to illustrate the challenges of clinical translation for a real-time AI-empowered echocardiography system with data of ICU patients in LMICs. Such ML case study includes data preparation, curation and labelling from 2D Ultrasound videos of 31 ICU patients in LMICs and model selection, validation and deployment of three thinner neural networks to classify apical four-chamber view. Results of the ML heuristics showed the promising implementation, validation and application of thinner networks to classify 4CV with limited datasets. We conclude this work mentioning the need for (a) datasets to improve diversity of demographics, diseases, and (b) the need of further investigations of thinner models to be run and implemented in low-cost hardware to be clinically translated in the ICU in LMICs. The code and other resources to reproduce this work are available at https://github.com/vital-ultrasound/ai-assisted-echocardiography-for-low-resource-countries. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.14510v2-abstract-full').style.display = 'none'; document.getElementById('2212.14510v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.14212">arXiv:2209.14212</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.14212">pdf</a>, <a href="https://arxiv.org/format/2209.14212">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Automated Quality Controlled Analysis of 2D Phase Contrast Cardiovascular Magnetic Resonance Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chan%2C+E">Emily Chan</a>, <a href="/search/cs?searchtype=author&amp;query=O%27Hanlon%2C+C">Ciaran O&#39;Hanlon</a>, <a href="/search/cs?searchtype=author&amp;query=Marquez%2C+C+A">Carlota Asegurado Marquez</a>, <a href="/search/cs?searchtype=author&amp;query=Petalcorin%2C+M">Marwenie Petalcorin</a>, <a href="/search/cs?searchtype=author&amp;query=Mariscal-Harana%2C+J">Jorge Mariscal-Harana</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+H">Haotian Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+R+J">Raymond J. Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Judd%2C+R+M">Robert M. Judd</a>, <a href="/search/cs?searchtype=author&amp;query=Chowienczyk%2C+P">Phil Chowienczyk</a>, <a href="/search/cs?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.14212v1-abstract-short" style="display: inline;"> Flow analysis carried out using phase contrast cardiac magnetic resonance imaging (PC-CMR) enables the quantification of important parameters that are used in the assessment of cardiovascular function. An essential part of this analysis is the identification of the correct CMR views and quality control (QC) to detect artefacts that could affect the flow quantification. We propose a novel deep lear&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.14212v1-abstract-full').style.display = 'inline'; document.getElementById('2209.14212v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.14212v1-abstract-full" style="display: none;"> Flow analysis carried out using phase contrast cardiac magnetic resonance imaging (PC-CMR) enables the quantification of important parameters that are used in the assessment of cardiovascular function. An essential part of this analysis is the identification of the correct CMR views and quality control (QC) to detect artefacts that could affect the flow quantification. We propose a novel deep learning based framework for the fully-automated analysis of flow from full CMR scans that first carries out these view selection and QC steps using two sequential convolutional neural networks, followed by automatic aorta and pulmonary artery segmentation to enable the quantification of key flow parameters. Accuracy values of 0.958 and 0.914 were obtained for view classification and QC, respectively. For segmentation, Dice scores were $&gt;$0.969 and the Bland-Altman plots indicated excellent agreement between manual and automatic peak flow values. In addition, we tested our pipeline on an external validation data set, with results indicating good robustness of the pipeline. This work was carried out using multivendor clinical data consisting of 986 cases, indicating the potential for the use of this pipeline in a clinical setting. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.14212v1-abstract-full').style.display = 'none'; document.getElementById('2209.14212v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">STACOM 2022 workshop</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.01627">arXiv:2209.01627</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.01627">pdf</a>, <a href="https://arxiv.org/format/2209.01627">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A systematic study of race and sex bias in CNN-based cardiac MR segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lee%2C+T">Tiarna Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Anton%2C+E">Esther Puyol-Anton</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+M">Miaojing Shi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.01627v1-abstract-short" style="display: inline;"> In computer vision there has been significant research interest in assessing potential demographic bias in deep learning models. One of the main causes of such bias is imbalance in the training data. In medical imaging, where the potential impact of bias is arguably much greater, there has been less interest. In medical imaging pipelines, segmentation of structures of interest plays an important r&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.01627v1-abstract-full').style.display = 'inline'; document.getElementById('2209.01627v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.01627v1-abstract-full" style="display: none;"> In computer vision there has been significant research interest in assessing potential demographic bias in deep learning models. One of the main causes of such bias is imbalance in the training data. In medical imaging, where the potential impact of bias is arguably much greater, there has been less interest. In medical imaging pipelines, segmentation of structures of interest plays an important role in estimating clinical biomarkers that are subsequently used to inform patient management. Convolutional neural networks (CNNs) are starting to be used to automate this process. We present the first systematic study of the impact of training set imbalance on race and sex bias in CNN-based segmentation. We focus on segmentation of the structures of the heart from short axis cine cardiac magnetic resonance images, and train multiple CNN segmentation models with different levels of race/sex imbalance. We find no significant bias in the sex experiment but significant bias in two separate race experiments, highlighting the need to consider adequate representation of different demographic groups in health datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.01627v1-abstract-full').style.display = 'none'; document.getElementById('2209.01627v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.06613">arXiv:2208.06613</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2208.06613">pdf</a>, <a href="https://arxiv.org/ps/2208.06613">ps</a>, <a href="https://arxiv.org/format/2208.06613">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A Study of Demographic Bias in CNN-based Brain MR Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ioannou%2C+S">Stefanos Ioannou</a>, <a href="/search/cs?searchtype=author&amp;query=Chockler%2C+H">Hana Chockler</a>, <a href="/search/cs?searchtype=author&amp;query=Hammers%2C+A">Alexander Hammers</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.06613v1-abstract-short" style="display: inline;"> Convolutional neural networks (CNNs) are increasingly being used to automate the segmentation of brain structures in magnetic resonance (MR) images for research studies. In other applications, CNN models have been shown to exhibit bias against certain demographic groups when they are under-represented in the training sets. In this work, we investigate whether CNN models for brain MR segmentation h&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.06613v1-abstract-full').style.display = 'inline'; document.getElementById('2208.06613v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.06613v1-abstract-full" style="display: none;"> Convolutional neural networks (CNNs) are increasingly being used to automate the segmentation of brain structures in magnetic resonance (MR) images for research studies. In other applications, CNN models have been shown to exhibit bias against certain demographic groups when they are under-represented in the training sets. In this work, we investigate whether CNN models for brain MR segmentation have the potential to contain sex or race bias when trained with imbalanced training sets. We train multiple instances of the FastSurferCNN model using different levels of sex imbalance in white subjects. We evaluate the performance of these models separately for white male and white female test sets to assess sex bias, and furthermore evaluate them on black male and black female test sets to assess potential racial bias. We find significant sex and race bias effects in segmentation model performance. The biases have a strong spatial component, with some brain regions exhibiting much stronger bias than others. Overall, our results suggest that race bias is more significant than sex bias. Our study demonstrates the importance of considering race and sex balance when forming training sets for CNN-based brain MR segmentation, to avoid maintaining or even exacerbating existing health inequalities through biased research study findings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.06613v1-abstract-full').style.display = 'none'; document.getElementById('2208.06613v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at MICCAI MLCN 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.03305">arXiv:2208.03305</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2208.03305">pdf</a>, <a href="https://arxiv.org/format/2208.03305">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning-based Segmentation of Pleural Effusion From Ultrasound Using Coordinate Convolutions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Morilhat%2C+G">Germain Morilhat</a>, <a href="/search/cs?searchtype=author&amp;query=Kifle%2C+N">Naomi Kifle</a>, <a href="/search/cs?searchtype=author&amp;query=FinesilverSmith%2C+S">Sandra FinesilverSmith</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Vergani%2C+V">Vittoria Vergani</a>, <a href="/search/cs?searchtype=author&amp;query=Desita%2C+H+T">Habtamu Tegegne Desita</a>, <a href="/search/cs?searchtype=author&amp;query=Desita%2C+Z+T">Zerubabel Tegegne Desita</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Anton%2C+E">Esther Puyol-Anton</a>, <a href="/search/cs?searchtype=author&amp;query=Carass%2C+A">Aaron Carass</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.03305v1-abstract-short" style="display: inline;"> In many low-to-middle income (LMIC) countries, ultrasound is used for assessment of pleural effusion. Typically, the extent of the effusion is manually measured by a sonographer, leading to significant intra-/inter-observer variability. In this work, we investigate the use of deep learning (DL) to automate the process of pleural effusion segmentation from ultrasound images. On two datasets acquire&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.03305v1-abstract-full').style.display = 'inline'; document.getElementById('2208.03305v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.03305v1-abstract-full" style="display: none;"> In many low-to-middle income (LMIC) countries, ultrasound is used for assessment of pleural effusion. Typically, the extent of the effusion is manually measured by a sonographer, leading to significant intra-/inter-observer variability. In this work, we investigate the use of deep learning (DL) to automate the process of pleural effusion segmentation from ultrasound images. On two datasets acquired in a LMIC setting, we achieve median Dice Similarity Coefficients (DSCs) of 0.82 and 0.74 respectively using the nnU-net DL model. We also investigate the use of coordinate convolutions in the DL model and find that this results in a statistically significant improvement in the median DSC on the first dataset to 0.85, with no significant change on the second dataset. This work showcases, for the first time, the potential of DL in automating the process of effusion assessment from ultrasound in LMIC settings where there is often a lack of experienced radiologists to perform such tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.03305v1-abstract-full').style.display = 'none'; document.getElementById('2208.03305v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This paper has been accepted for publication at the MICCAI FAIR workshop</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.01555">arXiv:2208.01555</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2208.01555">pdf</a>, <a href="https://arxiv.org/format/2208.01555">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> </div> <p class="title is-5 mathjax"> Low-complexity CNNs for Acoustic Scene Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Singh%2C+A">Arshdeep Singh</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+J+A">James A King</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xubo Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wenwu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Plumbley%2C+M+D">Mark D. Plumbley</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.01555v1-abstract-short" style="display: inline;"> This technical report describes the SurreyAudioTeam22s submission for DCASE 2022 ASC Task 1, Low-Complexity Acoustic Scene Classification (ASC). The task has two rules, (a) the ASC framework should have maximum 128K parameters, and (b) there should be a maximum of 30 millions multiply-accumulate operations (MACs) per inference. In this report, we present low-complexity systems for ASC that follow&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.01555v1-abstract-full').style.display = 'inline'; document.getElementById('2208.01555v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.01555v1-abstract-full" style="display: none;"> This technical report describes the SurreyAudioTeam22s submission for DCASE 2022 ASC Task 1, Low-Complexity Acoustic Scene Classification (ASC). The task has two rules, (a) the ASC framework should have maximum 128K parameters, and (b) there should be a maximum of 30 millions multiply-accumulate operations (MACs) per inference. In this report, we present low-complexity systems for ASC that follow the rules intended for the task. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.01555v1-abstract-full').style.display = 'none'; document.getElementById('2208.01555v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Technical Report DCASE 2022 TASK 1. arXiv admin note: substantial text overlap with arXiv:2207.11529</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.08137">arXiv:2206.08137</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.08137">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> An AI tool for automated analysis of large-scale unstructured clinical cine CMR databases </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mariscal-Harana%2C+J">Jorge Mariscal-Harana</a>, <a href="/search/cs?searchtype=author&amp;query=Asher%2C+C">Clint Asher</a>, <a href="/search/cs?searchtype=author&amp;query=Vergani%2C+V">Vittoria Vergani</a>, <a href="/search/cs?searchtype=author&amp;query=Rizvi%2C+M">Maleeha Rizvi</a>, <a href="/search/cs?searchtype=author&amp;query=Keehn%2C+L">Louise Keehn</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+R+J">Raymond J. Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Judd%2C+R+M">Robert M. Judd</a>, <a href="/search/cs?searchtype=author&amp;query=Petersen%2C+S+E">Steffen E. Petersen</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Andrew King</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.08137v2-abstract-short" style="display: inline;"> Artificial intelligence (AI) techniques have been proposed for automating analysis of short axis (SAX) cine cardiac magnetic resonance (CMR), but no CMR analysis tool exists to automatically analyse large (unstructured) clinical CMR datasets. We develop and validate a robust AI tool for start-to-end automatic quantification of cardiac function from SAX cine CMR in large clinical databases. Our pip&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.08137v2-abstract-full').style.display = 'inline'; document.getElementById('2206.08137v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.08137v2-abstract-full" style="display: none;"> Artificial intelligence (AI) techniques have been proposed for automating analysis of short axis (SAX) cine cardiac magnetic resonance (CMR), but no CMR analysis tool exists to automatically analyse large (unstructured) clinical CMR datasets. We develop and validate a robust AI tool for start-to-end automatic quantification of cardiac function from SAX cine CMR in large clinical databases. Our pipeline for processing and analysing CMR databases includes automated steps to identify the correct data, robust image pre-processing, an AI algorithm for biventricular segmentation of SAX CMR and estimation of functional biomarkers, and automated post-analysis quality control to detect and correct errors. The segmentation algorithm was trained on 2793 CMR scans from two NHS hospitals and validated on additional cases from this dataset (n=414) and five external datasets (n=6888), including scans of patients with a range of diseases acquired at 12 different centres using CMR scanners from all major vendors. Median absolute errors in cardiac biomarkers were within the range of inter-observer variability: &lt;8.4mL (left ventricle volume), &lt;9.2mL (right ventricle volume), &lt;13.3g (left ventricular mass), and &lt;5.9% (ejection fraction) across all datasets. Stratification of cases according to phenotypes of cardiac disease and scanner vendors showed good performance across all groups. We show that our proposed tool, which combines image pre-processing steps, a domain-generalisable AI algorithm trained on a large-scale multi-domain CMR dataset and quality control steps, allows robust analysis of (clinical or research) databases from multiple centres, vendors, and cardiac diseases. This enables translation of our tool for use in fully-automated processing of large multi-centre databases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.08137v2-abstract-full').style.display = 'none'; document.getElementById('2206.08137v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at EHJ Digital Health; Bram Ruijsink and Esther Puyol-Ant贸n are shared last authors</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.15286">arXiv:2205.15286</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.15286">pdf</a>, <a href="https://arxiv.org/format/2205.15286">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> Robust and accelerated single-spike spiking neural network training with applicability to challenging temporal tasks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Taylor%2C+L">Luke Taylor</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Andrew King</a>, <a href="/search/cs?searchtype=author&amp;query=Harper%2C+N">Nicol Harper</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.15286v2-abstract-short" style="display: inline;"> Spiking neural networks (SNNs), particularly the single-spike variant in which neurons spike at most once, are considerably more energy efficient than standard artificial neural networks (ANNs). However, single-spike SSNs are difficult to train due to their dynamic and non-differentiable nature, where current solutions are either slow or suffer from training instabilities. These networks have also&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.15286v2-abstract-full').style.display = 'inline'; document.getElementById('2205.15286v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.15286v2-abstract-full" style="display: none;"> Spiking neural networks (SNNs), particularly the single-spike variant in which neurons spike at most once, are considerably more energy efficient than standard artificial neural networks (ANNs). However, single-spike SSNs are difficult to train due to their dynamic and non-differentiable nature, where current solutions are either slow or suffer from training instabilities. These networks have also been critiqued for their limited computational applicability such as being unsuitable for time-series datasets. We propose a new model for training single-spike SNNs which mitigates the aforementioned training issues and obtains competitive results across various image and neuromorphic datasets, with up to a $13.98\times$ training speedup and up to an $81\%$ reduction in spikes compared to the multi-spike SNN. Notably, our model performs on par with multi-spike SNNs in challenging tasks involving neuromorphic time-series datasets, demonstrating a broader computational role for single-spike SNNs than previously believed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.15286v2-abstract-full').style.display = 'none'; document.getElementById('2205.15286v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 6 figures, under review at ICLR 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.01673">arXiv:2205.01673</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.01673">pdf</a>, <a href="https://arxiv.org/format/2205.01673">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> A Deep Learning-based Integrated Framework for Quality-aware Undersampled Cine Cardiac MRI Reconstruction and Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Machado%2C+I+P">In锚s P. Machado</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a>, <a href="/search/cs?searchtype=author&amp;query=Hammernik%2C+K">Kerstin Hammernik</a>, <a href="/search/cs?searchtype=author&amp;query=Cruz%2C+G">Gast茫o Cruz</a>, <a href="/search/cs?searchtype=author&amp;query=Ugurlu%2C+D">Devran Ugurlu</a>, <a href="/search/cs?searchtype=author&amp;query=Olakorede%2C+I">Ihsane Olakorede</a>, <a href="/search/cs?searchtype=author&amp;query=Oksuz%2C+I">Ilkay Oksuz</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Castelo-Branco%2C+M">Miguel Castelo-Branco</a>, <a href="/search/cs?searchtype=author&amp;query=Young%2C+A+A">Alistair A. Young</a>, <a href="/search/cs?searchtype=author&amp;query=Prieto%2C+C">Claudia Prieto</a>, <a href="/search/cs?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.01673v1-abstract-short" style="display: inline;"> Cine cardiac magnetic resonance (CMR) imaging is considered the gold standard for cardiac function evaluation. However, cine CMR acquisition is inherently slow and in recent decades considerable effort has been put into accelerating scan times without compromising image quality or the accuracy of derived results. In this paper, we present a fully-automated, quality-controlled integrated framework&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.01673v1-abstract-full').style.display = 'inline'; document.getElementById('2205.01673v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.01673v1-abstract-full" style="display: none;"> Cine cardiac magnetic resonance (CMR) imaging is considered the gold standard for cardiac function evaluation. However, cine CMR acquisition is inherently slow and in recent decades considerable effort has been put into accelerating scan times without compromising image quality or the accuracy of derived results. In this paper, we present a fully-automated, quality-controlled integrated framework for reconstruction, segmentation and downstream analysis of undersampled cine CMR data. The framework enables active acquisition of radial k-space data, in which acquisition can be stopped as soon as acquired data are sufficient to produce high quality reconstructions and segmentations. This results in reduced scan times and automated analysis, enabling robust and accurate estimation of functional biomarkers. To demonstrate the feasibility of the proposed approach, we perform realistic simulations of radial k-space acquisitions on a dataset of subjects from the UK Biobank and present results on in-vivo cine CMR k-space data collected from healthy subjects. The results demonstrate that our method can produce quality-controlled images in a mean scan time reduced from 12 to 4 seconds per slice, and that image quality is sufficient to allow clinically relevant parameters to be automatically estimated to within 5% mean absolute difference. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.01673v1-abstract-full').style.display = 'none'; document.getElementById('2205.01673v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.11726">arXiv:2203.11726</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.11726">pdf</a>, <a href="https://arxiv.org/format/2203.11726">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> AI-enabled Assessment of Cardiac Systolic and Diastolic Function from Echocardiography </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Sidhu%2C+B+S">Baldeep S. Sidhu</a>, <a href="/search/cs?searchtype=author&amp;query=Gould%2C+J">Justin Gould</a>, <a href="/search/cs?searchtype=author&amp;query=Porter%2C+B">Bradley Porter</a>, <a href="/search/cs?searchtype=author&amp;query=Elliott%2C+M+K">Mark K. Elliott</a>, <a href="/search/cs?searchtype=author&amp;query=Mehta%2C+V">Vishal Mehta</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+H">Haotian Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Xochicale%2C+M">Miguel Xochicale</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Rinaldi%2C+C+A">Christopher A. Rinaldi</a>, <a href="/search/cs?searchtype=author&amp;query=Cowie%2C+M">Martin Cowie</a>, <a href="/search/cs?searchtype=author&amp;query=Chowienczyk%2C+P">Phil Chowienczyk</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.11726v2-abstract-short" style="display: inline;"> Left ventricular (LV) function is an important factor in terms of patient management, outcome, and long-term survival of patients with heart disease. The most recently published clinical guidelines for heart failure recognise that over reliance on only one measure of cardiac function (LV ejection fraction) as a diagnostic and treatment stratification biomarker is suboptimal. Recent advances in AI-&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.11726v2-abstract-full').style.display = 'inline'; document.getElementById('2203.11726v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.11726v2-abstract-full" style="display: none;"> Left ventricular (LV) function is an important factor in terms of patient management, outcome, and long-term survival of patients with heart disease. The most recently published clinical guidelines for heart failure recognise that over reliance on only one measure of cardiac function (LV ejection fraction) as a diagnostic and treatment stratification biomarker is suboptimal. Recent advances in AI-based echocardiography analysis have shown excellent results on automated estimation of LV volumes and LV ejection fraction. However, from time-varying 2-D echocardiography acquisition, a richer description of cardiac function can be obtained by estimating functional biomarkers from the complete cardiac cycle. In this work we propose for the first time an AI approach for deriving advanced biomarkers of systolic and diastolic LV function from 2-D echocardiography based on segmentations of the full cardiac cycle. These biomarkers will allow clinicians to obtain a much richer picture of the heart in health and disease. The AI model is based on the &#39;nn-Unet&#39; framework and was trained and tested using four different databases. Results show excellent agreement between manual and automated analysis and showcase the potential of the advanced systolic and diastolic biomarkers for patient stratification. Finally, for a subset of 50 cases, we perform a correlation analysis between clinical biomarkers derived from echocardiography and CMR and we show excellent agreement between the two modalities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.11726v2-abstract-full').style.display = 'none'; document.getElementById('2203.11726v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> MICCAI ASMUS 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.03044">arXiv:2202.03044</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.03044">pdf</a>, <a href="https://arxiv.org/format/2202.03044">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Emerging Technologies">cs.ET</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3579368">10.1145/3579368 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Hybrid quantum annealing for larger-than-QPU lattice-structured problems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Raymond%2C+J">Jack Raymond</a>, <a href="/search/cs?searchtype=author&amp;query=Stevanovic%2C+R">Radomir Stevanovic</a>, <a href="/search/cs?searchtype=author&amp;query=Bernoudy%2C+W">William Bernoudy</a>, <a href="/search/cs?searchtype=author&amp;query=Boothby%2C+K">Kelly Boothby</a>, <a href="/search/cs?searchtype=author&amp;query=McGeoch%2C+C">Catherine McGeoch</a>, <a href="/search/cs?searchtype=author&amp;query=Berkley%2C+A+J">Andrew J. Berkley</a>, <a href="/search/cs?searchtype=author&amp;query=Farr%C3%A9%2C+P">Pau Farr茅</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+D">Andrew D. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.03044v1-abstract-short" style="display: inline;"> Quantum processing units (QPUs) executing annealing algorithms have shown promise in optimization and simulation applications. Hybrid algorithms are a natural bridge to additional applications of larger scale. We present a straightforward and effective method for solving larger-than-QPU lattice-structured Ising optimization problems. Performance is compared against simulated annealing with promisi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.03044v1-abstract-full').style.display = 'inline'; document.getElementById('2202.03044v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.03044v1-abstract-full" style="display: none;"> Quantum processing units (QPUs) executing annealing algorithms have shown promise in optimization and simulation applications. Hybrid algorithms are a natural bridge to additional applications of larger scale. We present a straightforward and effective method for solving larger-than-QPU lattice-structured Ising optimization problems. Performance is compared against simulated annealing with promising results, and improvement is shown as a function of the generation of D-Wave QPU used. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.03044v1-abstract-full').style.display = 'none'; document.getElementById('2202.03044v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages, 15 figures, supplementary code attachment</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> 2023. ACM Transactions on Quantum Computing </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.10105">arXiv:2201.10105</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.10105">pdf</a>, <a href="https://arxiv.org/format/2201.10105">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/EMBC48229.2022.9871449">10.1109/EMBC48229.2022.9871449 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Prediction of Neonatal Respiratory Distress in Term Babies at Birth from Digital Stethoscope Recorded Chest Sounds </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Grooby%2C+E">Ethan Grooby</a>, <a href="/search/cs?searchtype=author&amp;query=Sitaula%2C+C">Chiranjibi Sitaula</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+K">Kenneth Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+L">Lindsay Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Arrabella King</a>, <a href="/search/cs?searchtype=author&amp;query=Ramanathan%2C+A">Ashwin Ramanathan</a>, <a href="/search/cs?searchtype=author&amp;query=Malhotra%2C+A">Atul Malhotra</a>, <a href="/search/cs?searchtype=author&amp;query=Dumont%2C+G+A">Guy A. Dumont</a>, <a href="/search/cs?searchtype=author&amp;query=Marzbanrad%2C+F">Faezeh Marzbanrad</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.10105v1-abstract-short" style="display: inline;"> Neonatal respiratory distress is a common condition that if left untreated, can lead to short- and long-term complications. This paper investigates the usage of digital stethoscope recorded chest sounds taken within 1min post-delivery, to enable early detection and prediction of neonatal respiratory distress. Fifty-one term newborns were included in this study, 9 of whom developed respiratory dist&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.10105v1-abstract-full').style.display = 'inline'; document.getElementById('2201.10105v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.10105v1-abstract-full" style="display: none;"> Neonatal respiratory distress is a common condition that if left untreated, can lead to short- and long-term complications. This paper investigates the usage of digital stethoscope recorded chest sounds taken within 1min post-delivery, to enable early detection and prediction of neonatal respiratory distress. Fifty-one term newborns were included in this study, 9 of whom developed respiratory distress. For each newborn, 1min anterior and posterior recordings were taken. These recordings were pre-processed to remove noisy segments and obtain high-quality heart and lung sounds. The random undersampling boosting (RUSBoost) classifier was then trained on a variety of features, such as power and vital sign features extracted from the heart and lung sounds. The RUSBoost algorithm produced specificity, sensitivity, and accuracy results of 85.0%, 66.7% and 81.8%, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.10105v1-abstract-full').style.display = 'none'; document.getElementById('2201.10105v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages, 2 figures, 1 table. Paper submitted for potential publication as conference paper at 44th Annual International Conference of the IEEE Engineering in Medicine and Biology Society, 2022</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> 2022 44th Annual International Conference of the IEEE Engineering in Medicine &amp; Biology Society (EMBC) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.03211">arXiv:2201.03211</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.03211">pdf</a>, <a href="https://arxiv.org/format/2201.03211">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/JBHI.2022.3215995">10.1109/JBHI.2022.3215995 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Noisy Neonatal Chest Sound Separation for High-Quality Heart and Lung Sounds </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Grooby%2C+E">Ethan Grooby</a>, <a href="/search/cs?searchtype=author&amp;query=Sitaula%2C+C">Chiranjibi Sitaula</a>, <a href="/search/cs?searchtype=author&amp;query=Fattahi%2C+D">Davood Fattahi</a>, <a href="/search/cs?searchtype=author&amp;query=Sameni%2C+R">Reza Sameni</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+K">Kenneth Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+L">Lindsay Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Arrabella King</a>, <a href="/search/cs?searchtype=author&amp;query=Ramanathan%2C+A">Ashwin Ramanathan</a>, <a href="/search/cs?searchtype=author&amp;query=Malhotra%2C+A">Atul Malhotra</a>, <a href="/search/cs?searchtype=author&amp;query=Dumont%2C+G+A">Guy A. Dumont</a>, <a href="/search/cs?searchtype=author&amp;query=Marzbanrad%2C+F">Faezeh Marzbanrad</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.03211v1-abstract-short" style="display: inline;"> Stethoscope-recorded chest sounds provide the opportunity for remote cardio-respiratory health monitoring of neonates. However, reliable monitoring requires high-quality heart and lung sounds. This paper presents novel Non-negative Matrix Factorisation (NMF) and Non-negative Matrix Co-Factorisation (NMCF) methods for neonatal chest sound separation. To assess these methods and compare with existin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.03211v1-abstract-full').style.display = 'inline'; document.getElementById('2201.03211v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.03211v1-abstract-full" style="display: none;"> Stethoscope-recorded chest sounds provide the opportunity for remote cardio-respiratory health monitoring of neonates. However, reliable monitoring requires high-quality heart and lung sounds. This paper presents novel Non-negative Matrix Factorisation (NMF) and Non-negative Matrix Co-Factorisation (NMCF) methods for neonatal chest sound separation. To assess these methods and compare with existing single-source separation methods, an artificial mixture dataset was generated comprising of heart, lung and noise sounds. Signal-to-noise ratios were then calculated for these artificial mixtures. These methods were also tested on real-world noisy neonatal chest sounds and assessed based on vital sign estimation error and a signal quality score of 1-5 developed in our previous works. Additionally, the computational cost of all methods was assessed to determine the applicability for real-time processing. Overall, both the proposed NMF and NMCF methods outperform the next best existing method by 2.7dB to 11.6dB for the artificial dataset and 0.40 to 1.12 signal quality improvement for the real-world dataset. The median processing time for the sound separation of a 10s recording was found to be 28.3s for NMCF and 342ms for NMF. Because of stable and robust performance, we believe that our proposed methods are useful to denoise neonatal heart and lung sound in a real-world environment. Codes for proposed and existing methods can be found at: https://github.com/egrooby-monash/Heart-and-Lung-Sound-Separation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.03211v1-abstract-full').style.display = 'none'; document.getElementById('2201.03211v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 4 figures, 3 tables. Paper submitted and under review for possible publication in IEEE</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Journal of Biomedical and Health Informatics, 2022 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.09333">arXiv:2112.09333</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.09333">pdf</a>, <a href="https://arxiv.org/format/2112.09333">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Deep Bayesian Learning for Car Hacking Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ale%2C+L">Laha Ale</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+S+A">Scott A. King</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+N">Ning Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.09333v1-abstract-short" style="display: inline;"> With the rise of self-drive cars and connected vehicles, cars are equipped with various devices to assistant the drivers or support self-drive systems. Undoubtedly, cars have become more intelligent as we can deploy more and more devices and software on the cars. Accordingly, the security of assistant and self-drive systems in the cars becomes a life-threatening issue as smart cars can be invaded&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.09333v1-abstract-full').style.display = 'inline'; document.getElementById('2112.09333v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.09333v1-abstract-full" style="display: none;"> With the rise of self-drive cars and connected vehicles, cars are equipped with various devices to assistant the drivers or support self-drive systems. Undoubtedly, cars have become more intelligent as we can deploy more and more devices and software on the cars. Accordingly, the security of assistant and self-drive systems in the cars becomes a life-threatening issue as smart cars can be invaded by malicious attacks that cause traffic accidents. Currently, canonical machine learning and deep learning methods are extensively employed in car hacking detection. However, machine learning and deep learning methods can easily be overconfident and defeated by carefully designed adversarial examples. Moreover, those methods cannot provide explanations for security engineers for further analysis. In this work, we investigated Deep Bayesian Learning models to detect and analyze car hacking behaviors. The Bayesian learning methods can capture the uncertainty of the data and avoid overconfident issues. Moreover, the Bayesian models can provide more information to support the prediction results that can help security engineers further identify the attacks. We have compared our model with deep learning models and the results show the advantages of our proposed model. The code of this work is publicly available <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.09333v1-abstract-full').style.display = 'none'; document.getElementById('2112.09333v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.09328">arXiv:2112.09328</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.09328">pdf</a>, <a href="https://arxiv.org/format/2112.09328">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/JIOT.2022.3166110">10.1109/JIOT.2022.3166110 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> D3PG: Dirichlet DDPG for Task Partitioning and Offloading with Constrained Hybrid Action Space in Mobile Edge Computing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ale%2C+L">Laha Ale</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+S+A">Scott A. King</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+N">Ning Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Sattar%2C+A+R">Abdul Rahman Sattar</a>, <a href="/search/cs?searchtype=author&amp;query=Skandaraniyam%2C+J">Janahan Skandaraniyam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.09328v2-abstract-short" style="display: inline;"> Mobile Edge Computing (MEC) has been regarded as a promising paradigm to reduce service latency for data processing in the Internet of Things, by provisioning computing resources at the network edge. In this work, we jointly optimize the task partitioning and computational power allocation for computation offloading in a dynamic environment with multiple IoT devices and multiple edge servers. We f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.09328v2-abstract-full').style.display = 'inline'; document.getElementById('2112.09328v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.09328v2-abstract-full" style="display: none;"> Mobile Edge Computing (MEC) has been regarded as a promising paradigm to reduce service latency for data processing in the Internet of Things, by provisioning computing resources at the network edge. In this work, we jointly optimize the task partitioning and computational power allocation for computation offloading in a dynamic environment with multiple IoT devices and multiple edge servers. We formulate the problem as a Markov decision process with constrained hybrid action space, which cannot be well handled by existing deep reinforcement learning (DRL) algorithms. Therefore, we develop a novel Deep Reinforcement Learning called Dirichlet Deep Deterministic Policy Gradient (D3PG), which is built on Deep Deterministic Policy Gradient (DDPG) to solve the problem. The developed model can learn to solve multi-objective optimization, including maximizing the number of tasks processed before expiration and minimizing the energy cost and service latency.} More importantly, D3PG can effectively deal with constrained distribution-continuous hybrid action space, where the distribution variables are for the task partitioning and offloading, while the continuous variables are for computational frequency control. Moreover, the D3PG can address many similar issues in MEC and general reinforcement learning problems. Extensive simulation results show that the proposed D3PG outperforms the state-of-art methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.09328v2-abstract-full').style.display = 'none'; document.getElementById('2112.09328v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.05160">arXiv:2110.05160</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.05160">pdf</a>, <a href="https://arxiv.org/format/2110.05160">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Spoki: Unveiling a New Wave of Scanners through a Reactive Network Telescope </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hiesgen%2C+R">Raphael Hiesgen</a>, <a href="/search/cs?searchtype=author&amp;query=Nawrocki%2C+M">Marcin Nawrocki</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Alistair King</a>, <a href="/search/cs?searchtype=author&amp;query=Dainotti%2C+A">Alberto Dainotti</a>, <a href="/search/cs?searchtype=author&amp;query=Schmidt%2C+T+C">Thomas C. Schmidt</a>, <a href="/search/cs?searchtype=author&amp;query=W%C3%A4hlisch%2C+M">Matthias W盲hlisch</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.05160v1-abstract-short" style="display: inline;"> Large-scale Internet scans are a common method to identify victims of a specific attack. Stateless scanning like in ZMap has been established as an efficient approach to probing at Internet scale. Stateless scans, however, need a second phase to perform the attack, which remains invisible to network telescopes that only capture the first incoming packet and is not observed as a related event by ho&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.05160v1-abstract-full').style.display = 'inline'; document.getElementById('2110.05160v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.05160v1-abstract-full" style="display: none;"> Large-scale Internet scans are a common method to identify victims of a specific attack. Stateless scanning like in ZMap has been established as an efficient approach to probing at Internet scale. Stateless scans, however, need a second phase to perform the attack, which remains invisible to network telescopes that only capture the first incoming packet and is not observed as a related event by honeypots. In this work, we examine Internet-wide scan traffic through Spoki, a reactive network telescope operating in real-time that we design and implement. Spoki responds to asynchronous TCP SYN packets and engages in TCP handshakes initiated in the second phase of two-phase scans. Because it is extremely lightweight it scales to large prefixes where it has the unique opportunity to record the first data sequence submitted within the TCP handshake ACK. We analyze two-phase scanners during a three months period using globally deployed Spoki reactive telescopes as well as flow data sets from IXPs and ISPs. We find that a predominant fraction of TCP SYNs on the Internet has irregular characteristics. Our findings also provide a clear signature of today&#39;s scans as: (i) highly targeted, (ii) scanning activities notably vary between regional vantage points, and (iii) a significant share originates from malicious sources. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.05160v1-abstract-full').style.display = 'none'; document.getElementById('2110.05160v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Proc. of 31st USENIX Security Symposium, camera-ready</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.15127">arXiv:2109.15127</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.15127">pdf</a>, <a href="https://arxiv.org/format/2109.15127">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ACCESS.2022.3144355">10.1109/ACCESS.2022.3144355 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Real-Time Multi-Level Neonatal Heart and Lung Sound Quality Assessment for Telehealth Applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Grooby%2C+E">Ethan Grooby</a>, <a href="/search/cs?searchtype=author&amp;query=Sitaula%2C+C">Chiranjibi Sitaula</a>, <a href="/search/cs?searchtype=author&amp;query=Fattahi%2C+D">Davood Fattahi</a>, <a href="/search/cs?searchtype=author&amp;query=Sameni%2C+R">Reza Sameni</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+K">Kenneth Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+L">Lindsay Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Arrabella King</a>, <a href="/search/cs?searchtype=author&amp;query=Ramanathan%2C+A">Ashwin Ramanathan</a>, <a href="/search/cs?searchtype=author&amp;query=Malhotra%2C+A">Atul Malhotra</a>, <a href="/search/cs?searchtype=author&amp;query=Dumont%2C+G+A">Guy A. Dumont</a>, <a href="/search/cs?searchtype=author&amp;query=Marzbanrad%2C+F">Faezeh Marzbanrad</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.15127v1-abstract-short" style="display: inline;"> Digital stethoscopes in combination with telehealth allow chest sounds to be easily collected and transmitted for remote monitoring and diagnosis. Chest sounds contain important information about a newborn&#39;s cardio-respiratory health. However, low-quality recordings complicate the remote monitoring and diagnosis. In this study, a new method is proposed to objectively and automatically assess heart&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.15127v1-abstract-full').style.display = 'inline'; document.getElementById('2109.15127v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.15127v1-abstract-full" style="display: none;"> Digital stethoscopes in combination with telehealth allow chest sounds to be easily collected and transmitted for remote monitoring and diagnosis. Chest sounds contain important information about a newborn&#39;s cardio-respiratory health. However, low-quality recordings complicate the remote monitoring and diagnosis. In this study, a new method is proposed to objectively and automatically assess heart and lung signal quality on a 5-level scale in real-time and to assess the effect of signal quality on vital sign estimation. For the evaluation, a total of 207 10s long chest sounds were taken from 119 preterm and full-term babies. Thirty of the recordings from ten subjects were obtained with synchronous vital signs from the Neonatal Intensive Care Unit (NICU) based on electrocardiogram recordings. As reference, seven annotators independently assessed the signal quality. For automatic quality classification, 400 features were extracted from the chest sounds. After feature selection using minimum redundancy and maximum relevancy algorithm, class balancing, and hyper-parameter optimization, a variety of multi-class and ordinal classification and regression algorithms were trained. Then, heart rate and breathing rate were automatically estimated from the chest sounds using adapted pre-existing methods. The results of subject-wise leave-one-out cross-validation show that the best-performing models had a mean squared error (MSE) of 0.49 and 0.61, and balanced accuracy of 57% and 51% for heart and lung qualities, respectively. The best-performing models for real-time analysis (&lt;200ms) had MSE of 0.459 and 0.67, and balanced accuracy of 57% and 46%, respectively. Our experimental results underscore that increasing the signal quality leads to a reduction in vital sign error, with only high-quality recordings having a mean absolute error of less than 5 beats per minute, as required for clinical usage. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.15127v1-abstract-full').style.display = 'none'; document.getElementById('2109.15127v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 8 figures, 3 tables. Paper submitted and under review in IEEE Access</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Access, 2022 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.13230">arXiv:2109.13230</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.13230">pdf</a>, <a href="https://arxiv.org/ps/2109.13230">ps</a>, <a href="https://arxiv.org/format/2109.13230">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> The Impact of Domain Shift on Left and Right Ventricle Segmentation in Short Axis Cardiac MR Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ugurlu%2C+D">Devran Ugurlu</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Anton%2C+E">Esther Puyol-Anton</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Young%2C+A">Alistair Young</a>, <a href="/search/cs?searchtype=author&amp;query=Machado%2C+I">Ines Machado</a>, <a href="/search/cs?searchtype=author&amp;query=Hammernik%2C+K">Kerstin Hammernik</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a>, <a href="/search/cs?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.13230v1-abstract-short" style="display: inline;"> Domain shift refers to the difference in the data distribution of two datasets, normally between the training set and the test set for machine learning algorithms. Domain shift is a serious problem for generalization of machine learning models and it is well-established that a domain shift between the training and test sets may cause a drastic drop in the model&#39;s performance. In medical imaging, t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.13230v1-abstract-full').style.display = 'inline'; document.getElementById('2109.13230v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.13230v1-abstract-full" style="display: none;"> Domain shift refers to the difference in the data distribution of two datasets, normally between the training set and the test set for machine learning algorithms. Domain shift is a serious problem for generalization of machine learning models and it is well-established that a domain shift between the training and test sets may cause a drastic drop in the model&#39;s performance. In medical imaging, there can be many sources of domain shift such as different scanners or scan protocols, different pathologies in the patient population, anatomical differences in the patient population (e.g. men vs women) etc. Therefore, in order to train models that have good generalization performance, it is important to be aware of the domain shift problem, its potential causes and to devise ways to address it. In this paper, we study the effect of domain shift on left and right ventricle blood pool segmentation in short axis cardiac MR images. Our dataset contains short axis images from 4 different MR scanners and 3 different pathology groups. The training is performed with nnUNet. The results show that scanner differences cause a greater drop in performance compared to changing the pathology group, and that the impact of domain shift is greater on right ventricle segmentation compared to left ventricle segmentation. Increasing the number of training subjects increased cross-scanner performance more than in-scanner performance at small training set sizes, but this difference in improvement decreased with larger training set sizes. Training models using data from multiple scanners improved cross-domain performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.13230v1-abstract-full').style.display = 'none'; document.getElementById('2109.13230v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to STACOM 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.10641">arXiv:2109.10641</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.10641">pdf</a>, <a href="https://arxiv.org/format/2109.10641">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Uncertainty-Aware Training for Cardiac Resynchronisation Therapy Response Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dawood%2C+T">Tareen Dawood</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Andlauer%2C+R">Robin Andlauer</a>, <a href="/search/cs?searchtype=author&amp;query=Sidhu%2C+B+S">Baldeep S. Sidhu</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Gould%2C+J">Justin Gould</a>, <a href="/search/cs?searchtype=author&amp;query=Porter%2C+B">Bradley Porter</a>, <a href="/search/cs?searchtype=author&amp;query=Elliott%2C+M">Mark Elliott</a>, <a href="/search/cs?searchtype=author&amp;query=Mehta%2C+V">Vishal Mehta</a>, <a href="/search/cs?searchtype=author&amp;query=Rinaldi%2C+C+A">C. Aldo Rinaldi</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.10641v1-abstract-short" style="display: inline;"> Evaluation of predictive deep learning (DL) models beyond conventional performance metrics has become increasingly important for applications in sensitive environments like healthcare. Such models might have the capability to encode and analyse large sets of data but they often lack comprehensive interpretability methods, preventing clinical trust in predictive outcomes. Quantifying uncertainty of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.10641v1-abstract-full').style.display = 'inline'; document.getElementById('2109.10641v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.10641v1-abstract-full" style="display: none;"> Evaluation of predictive deep learning (DL) models beyond conventional performance metrics has become increasingly important for applications in sensitive environments like healthcare. Such models might have the capability to encode and analyse large sets of data but they often lack comprehensive interpretability methods, preventing clinical trust in predictive outcomes. Quantifying uncertainty of a prediction is one way to provide such interpretability and promote trust. However, relatively little attention has been paid to how to include such requirements into the training of the model. In this paper we: (i) quantify the data (aleatoric) and model (epistemic) uncertainty of a DL model for Cardiac Resynchronisation Therapy response prediction from cardiac magnetic resonance images, and (ii) propose and perform a preliminary investigation of an uncertainty-aware loss function that can be used to retrain an existing DL image-based classification model to encourage confidence in correct predictions and reduce confidence in incorrect predictions. Our initial results are promising, showing a significant increase in the (epistemic) confidence of true positive predictions, with some evidence of a reduction in false negative confidence. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.10641v1-abstract-full').style.display = 'none'; document.getElementById('2109.10641v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">STACOM 2021 Workshop</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.09421">arXiv:2109.09421</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.09421">pdf</a>, <a href="https://arxiv.org/format/2109.09421">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Improved AI-based segmentation of apical and basal slices from clinical cine CMR </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mariscal-Harana%2C+J">Jorge Mariscal-Harana</a>, <a href="/search/cs?searchtype=author&amp;query=Kifle%2C+N">Naomi Kifle</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.09421v1-abstract-short" style="display: inline;"> Current artificial intelligence (AI) algorithms for short-axis cardiac magnetic resonance (CMR) segmentation achieve human performance for slices situated in the middle of the heart. However, an often-overlooked fact is that segmentation of the basal and apical slices is more difficult. During manual analysis, differences in the basal segmentations have been reported as one of the major sources of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.09421v1-abstract-full').style.display = 'inline'; document.getElementById('2109.09421v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.09421v1-abstract-full" style="display: none;"> Current artificial intelligence (AI) algorithms for short-axis cardiac magnetic resonance (CMR) segmentation achieve human performance for slices situated in the middle of the heart. However, an often-overlooked fact is that segmentation of the basal and apical slices is more difficult. During manual analysis, differences in the basal segmentations have been reported as one of the major sources of disagreement in human interobserver variability. In this work, we aim to investigate the performance of AI algorithms in segmenting basal and apical slices and design strategies to improve their segmentation. We trained all our models on a large dataset of clinical CMR studies obtained from two NHS hospitals (n=4,228) and evaluated them against two external datasets: ACDC (n=100) and M&amp;Ms (n=321). Using manual segmentations as a reference, CMR slices were assigned to one of four regions: non-cardiac, base, middle, and apex. Using the nnU-Net framework as a baseline, we investigated two different approaches to reduce the segmentation performance gap between cardiac regions: (1) non-uniform batch sampling, which allows us to choose how often images from different regions are seen during training; and (2) a cardiac-region classification model followed by three (i.e. base, middle, and apex) region-specific segmentation models. We show that the classification and segmentation approach was best at reducing the performance gap across all datasets. We also show that improvements in the classification performance can subsequently lead to a significantly better performance in the segmentation task. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.09421v1-abstract-full').style.display = 'none'; document.getElementById('2109.09421v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">*Shared last authors</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.07955">arXiv:2109.07955</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.07955">pdf</a>, <a href="https://arxiv.org/format/2109.07955">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Quality-aware Cine Cardiac MRI Reconstruction and Analysis from Undersampled k-space Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Machado%2C+I">Ines Machado</a>, <a href="/search/cs?searchtype=author&amp;query=Puyol-Anton%2C+E">Esther Puyol-Anton</a>, <a href="/search/cs?searchtype=author&amp;query=Hammernik%2C+K">Kerstin Hammernik</a>, <a href="/search/cs?searchtype=author&amp;query=Cruz%2C+G">Gastao Cruz</a>, <a href="/search/cs?searchtype=author&amp;query=Ugurlu%2C+D">Devran Ugurlu</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Castelo-Branco%2C+M">Miguel Castelo-Branco</a>, <a href="/search/cs?searchtype=author&amp;query=Young%2C+A">Alistair Young</a>, <a href="/search/cs?searchtype=author&amp;query=Prieto%2C+C">Claudia Prieto</a>, <a href="/search/cs?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.07955v1-abstract-short" style="display: inline;"> Cine cardiac MRI is routinely acquired for the assessment of cardiac health, but the imaging process is slow and typically requires several breath-holds to acquire sufficient k-space profiles to ensure good image quality. Several undersampling-based reconstruction techniques have been proposed during the last decades to speed up cine cardiac MRI acquisition. However, the undersampling factor is co&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.07955v1-abstract-full').style.display = 'inline'; document.getElementById('2109.07955v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.07955v1-abstract-full" style="display: none;"> Cine cardiac MRI is routinely acquired for the assessment of cardiac health, but the imaging process is slow and typically requires several breath-holds to acquire sufficient k-space profiles to ensure good image quality. Several undersampling-based reconstruction techniques have been proposed during the last decades to speed up cine cardiac MRI acquisition. However, the undersampling factor is commonly fixed to conservative values before acquisition to ensure diagnostic image quality, potentially leading to unnecessarily long scan times. In this paper, we propose an end-to-end quality-aware cine short-axis cardiac MRI framework that combines image acquisition and reconstruction with downstream tasks such as segmentation, volume curve analysis and estimation of cardiac functional parameters. The goal is to reduce scan time by acquiring only a fraction of k-space data to enable the reconstruction of images that can pass quality control checks and produce reliable estimates of cardiac functional parameters. The framework consists of a deep learning model for the reconstruction of 2D+t cardiac cine MRI images from undersampled data, an image quality-control step to detect good quality reconstructions, followed by a deep learning model for bi-ventricular segmentation, a quality-control step to detect good quality segmentations and automated calculation of cardiac functional parameters. To demonstrate the feasibility of the proposed approach, we perform simulations using a cohort of selected participants from the UK Biobank (n=270), 200 healthy subjects and 70 patients with cardiomyopathies. Our results show that we can produce quality-controlled images in a scan time reduced from 12 to 4 seconds per slice, enabling reliable estimates of cardiac functional parameters such as ejection fraction within 5% mean absolute error. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.07955v1-abstract-full').style.display = 'none'; document.getElementById('2109.07955v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.03275">arXiv:2109.03275</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.03275">pdf</a>, <a href="https://arxiv.org/format/2109.03275">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/EMBC46164.2021.9630256">10.1109/EMBC46164.2021.9630256 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A New Non-Negative Matrix Co-Factorisation Approach for Noisy Neonatal Chest Sound Separation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Grooby%2C+E">Ethan Grooby</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+J">Jinyuan He</a>, <a href="/search/cs?searchtype=author&amp;query=Fattahi%2C+D">Davood Fattahi</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+L">Lindsay Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Arrabella King</a>, <a href="/search/cs?searchtype=author&amp;query=Ramanathan%2C+A">Ashwin Ramanathan</a>, <a href="/search/cs?searchtype=author&amp;query=Malhotra%2C+A">Atul Malhotra</a>, <a href="/search/cs?searchtype=author&amp;query=Dumont%2C+G+A">Guy A. Dumont</a>, <a href="/search/cs?searchtype=author&amp;query=Marzbanrad%2C+F">Faezeh Marzbanrad</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.03275v1-abstract-short" style="display: inline;"> Obtaining high-quality heart and lung sounds enables clinicians to accurately assess a newborn&#39;s cardio-respiratory health and provide timely care. However, noisy chest sound recordings are common, hindering timely and accurate assessment. A new Non-negative Matrix Co-Factorisation-based approach is proposed to separate noisy chest sound recordings into heart, lung, and noise components to address&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.03275v1-abstract-full').style.display = 'inline'; document.getElementById('2109.03275v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.03275v1-abstract-full" style="display: none;"> Obtaining high-quality heart and lung sounds enables clinicians to accurately assess a newborn&#39;s cardio-respiratory health and provide timely care. However, noisy chest sound recordings are common, hindering timely and accurate assessment. A new Non-negative Matrix Co-Factorisation-based approach is proposed to separate noisy chest sound recordings into heart, lung, and noise components to address this problem. This method is achieved through training with 20 high-quality heart and lung sounds, in parallel with separating the sounds of the noisy recording. The method was tested on 68 10-second noisy recordings containing both heart and lung sounds and compared to the current state of the art Non-negative Matrix Factorisation methods. Results show significant improvements in heart and lung sound quality scores respectively, and improved accuracy of 3.6bpm and 1.2bpm in heart and breathing rate estimation respectively, when compared to existing methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.03275v1-abstract-full').style.display = 'none'; document.getElementById('2109.03275v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 2 figures. To appear as conference paper at 43rd Annual International Conference of the IEEE Engineering in Medicine and Biology Society, 1st-5th November 2021</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> 2021 43rd Annual International Conference of the IEEE Engineering in Medicine Biology Society (EMBC) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.12689">arXiv:2107.12689</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.12689">pdf</a>, <a href="https://arxiv.org/format/2107.12689">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TMI.2022.3203309">10.1109/TMI.2022.3203309 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A persistent homology-based topological loss for CNN-based multi-class segmentation of CMR </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Byrne%2C+N">Nick Byrne</a>, <a href="/search/cs?searchtype=author&amp;query=Clough%2C+J+R">James R Clough</a>, <a href="/search/cs?searchtype=author&amp;query=Valverde%2C+I">Isra Valverde</a>, <a href="/search/cs?searchtype=author&amp;query=Montana%2C+G">Giovanni Montana</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.12689v2-abstract-short" style="display: inline;"> Multi-class segmentation of cardiac magnetic resonance (CMR) images seeks a separation of data into anatomical components with known structure and configuration. The most popular CNN-based methods are optimised using pixel wise loss functions, ignorant of the spatially extended features that characterise anatomy. Therefore, whilst sharing a high spatial overlap with the ground truth, inferred CNN-&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.12689v2-abstract-full').style.display = 'inline'; document.getElementById('2107.12689v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.12689v2-abstract-full" style="display: none;"> Multi-class segmentation of cardiac magnetic resonance (CMR) images seeks a separation of data into anatomical components with known structure and configuration. The most popular CNN-based methods are optimised using pixel wise loss functions, ignorant of the spatially extended features that characterise anatomy. Therefore, whilst sharing a high spatial overlap with the ground truth, inferred CNN-based segmentations can lack coherence, including spurious connected components, holes and voids. Such results are implausible, violating anticipated anatomical topology. In response, (single-class) persistent homology-based loss functions have been proposed to capture global anatomical features. Our work extends these approaches to the task of multi-class segmentation. Building an enriched topological description of all class labels and class label pairs, our loss functions make predictable and statistically significant improvements in segmentation topology using a CNN-based post-processing framework. We also present (and make available) a highly efficient implementation based on cubical complexes and parallel execution, enabling practical application within high resolution 3D data for the first time. We demonstrate our approach on 2D short axis and 3D whole heart CMR segmentation, advancing a detailed and faithful analysis of performance on two publicly available datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.12689v2-abstract-full').style.display = 'none'; document.getElementById('2107.12689v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Version accepted for publication in IEEE Transactions on Medical Imaging</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.05515">arXiv:2107.05515</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.05515">pdf</a>, <a href="https://arxiv.org/format/2107.05515">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Mathematical Analysis of Redistricting in Utah </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=King%2C+A">Annika King</a>, <a href="/search/cs?searchtype=author&amp;query=Murri%2C+J">Jacob Murri</a>, <a href="/search/cs?searchtype=author&amp;query=Callahan%2C+J">Jake Callahan</a>, <a href="/search/cs?searchtype=author&amp;query=Russell%2C+A">Adrienne Russell</a>, <a href="/search/cs?searchtype=author&amp;query=Jarvis%2C+T+J">Tyler J. Jarvis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.05515v3-abstract-short" style="display: inline;"> We discuss difficulties of evaluating partisan gerrymandering in the congressional districts in Utah and the failure of many common metrics in Utah. We explain why the Republican vote share in the least-Republican district (LRVS) is a good indicator of the advantage or disadvantage each party has in the Utah congressional districts. Although the LRVS only makes sense in settings with at most one c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.05515v3-abstract-full').style.display = 'inline'; document.getElementById('2107.05515v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.05515v3-abstract-full" style="display: none;"> We discuss difficulties of evaluating partisan gerrymandering in the congressional districts in Utah and the failure of many common metrics in Utah. We explain why the Republican vote share in the least-Republican district (LRVS) is a good indicator of the advantage or disadvantage each party has in the Utah congressional districts. Although the LRVS only makes sense in settings with at most one competitive district, in that setting it directly captures the extent to which a given redistricting plan gives advantage or disadvantage to the Republican and Democratic parties. We use the LRVS to evaluate the most common measures of partisan gerrymandering in the context of Utah&#39;s 2011 congressional districts. We do this by generating large ensembles of alternative redistricting plans using Markov chain Monte Carlo methods. We also discuss the implications of this new metric and our results on the question of whether the 2011 Utah congressional plan was gerrymandered. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.05515v3-abstract-full').style.display = 'none'; document.getElementById('2107.05515v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">27 pages, 6. figures, submitted to &#34;Statistics and Public Policy&#34;</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> K.4.1 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.12387">arXiv:2106.12387</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.12387">pdf</a>, <a href="https://arxiv.org/format/2106.12387">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Fairness in Cardiac MR Image Analysis: An Investigation of Bias Due to Data Imbalance in Deep Learning Based Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Puyol-Anton%2C+E">Esther Puyol-Anton</a>, <a href="/search/cs?searchtype=author&amp;query=Ruijsink%2C+B">Bram Ruijsink</a>, <a href="/search/cs?searchtype=author&amp;query=Piechnik%2C+S+K">Stefan K. Piechnik</a>, <a href="/search/cs?searchtype=author&amp;query=Neubauer%2C+S">Stefan Neubauer</a>, <a href="/search/cs?searchtype=author&amp;query=Petersen%2C+S+E">Steffen E. Petersen</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.12387v2-abstract-short" style="display: inline;"> The subject of &#34;fairness&#34; in artificial intelligence (AI) refers to assessing AI algorithms for potential bias based on demographic characteristics such as race and gender, and the development of algorithms to address this bias. Most applications to date have been in computer vision, although some work in healthcare has started to emerge. The use of deep learning (DL) in cardiac MR segmentation ha&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.12387v2-abstract-full').style.display = 'inline'; document.getElementById('2106.12387v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.12387v2-abstract-full" style="display: none;"> The subject of &#34;fairness&#34; in artificial intelligence (AI) refers to assessing AI algorithms for potential bias based on demographic characteristics such as race and gender, and the development of algorithms to address this bias. Most applications to date have been in computer vision, although some work in healthcare has started to emerge. The use of deep learning (DL) in cardiac MR segmentation has led to impressive results in recent years, and such techniques are starting to be translated into clinical practice. However, no work has yet investigated the fairness of such models. In this work, we perform such an analysis for racial/gender groups, focusing on the problem of training data imbalance, using a nnU-Net model trained and evaluated on cine short axis cardiac MR data from the UK Biobank dataset, consisting of 5,903 subjects from 6 different racial groups. We find statistically significant differences in Dice performance between different racial groups. To reduce the racial bias, we investigated three strategies: (1) stratified batch sampling, in which batch sampling is stratified to ensure balance between racial groups; (2) fair meta-learning for segmentation, in which a DL classifier is trained to classify race and jointly optimized with the segmentation model; and (3) protected group models, in which a different segmentation model is trained for each racial group. We also compared the results to the scenario where we have a perfectly balanced database. To assess fairness we used the standard deviation (SD) and skewed error ratio (SER) of the average Dice values. Our results demonstrate that the racial bias results from the use of imbalanced training data, and that all proposed bias mitigation strategies improved fairness, with the best SD and SER resulting from the use of protected group models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.12387v2-abstract-full').style.display = 'none'; document.getElementById('2106.12387v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">MICCAI 2021 conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.15106">arXiv:2104.15106</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2104.15106">pdf</a>, <a href="https://arxiv.org/format/2104.15106">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Latent Factor Decomposition Model: Applications for Questionnaire Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=McLaughlin%2C+C+J">Connor J. McLaughlin</a>, <a href="/search/cs?searchtype=author&amp;query=Kokkotou%2C+E+G">Efi G. Kokkotou</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+J+A">Jean A. King</a>, <a href="/search/cs?searchtype=author&amp;query=Conboy%2C+L+A">Lisa A. Conboy</a>, <a href="/search/cs?searchtype=author&amp;query=Yousefi%2C+A">Ali Yousefi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.15106v3-abstract-short" style="display: inline;"> The analysis of clinical questionnaire data comes with many inherent challenges. These challenges include the handling of data with missing fields, as well as the overall interpretation of a dataset with many fields of different scales and forms. While numerous methods have been developed to address these challenges, they are often not robust, statistically sound, or easily interpretable. Here, we&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.15106v3-abstract-full').style.display = 'inline'; document.getElementById('2104.15106v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.15106v3-abstract-full" style="display: none;"> The analysis of clinical questionnaire data comes with many inherent challenges. These challenges include the handling of data with missing fields, as well as the overall interpretation of a dataset with many fields of different scales and forms. While numerous methods have been developed to address these challenges, they are often not robust, statistically sound, or easily interpretable. Here, we propose a latent factor modeling framework that extends the principal component analysis for both categorical and quantitative data with missing elements. The model simultaneously provides the principal components (basis) and each patients&#39; projections on these bases in a latent space. We show an application of our modeling framework through Irritable Bowel Syndrome (IBS) symptoms, where we find correlations between these projections and other standardized patient symptom scales. This latent factor model can be easily applied to different clinical questionnaire datasets for clustering analysis and interpretable inference. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.15106v3-abstract-full').style.display = 'none'; document.getElementById('2104.15106v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for the 43rd IEEE Annual International Conference of the IEEE Engineering in Medicine and Biology Society, EMBC 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.07814">arXiv:2103.07814</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2103.07814">pdf</a>, <a href="https://arxiv.org/format/2103.07814">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3448613">10.1145/3448613 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Spatio-Temporal Bayesian Learning for Mobile Edge Computing Resource Planning in Smart Cities </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ale%2C+L">Laha Ale</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+N">Ning Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+S+A">Scott A. King</a>, <a href="/search/cs?searchtype=author&amp;query=Guardiola%2C+J">Jose Guardiola</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.07814v1-abstract-short" style="display: inline;"> A smart city improves operational efficiency and comfort of living by harnessing techniques such as the Internet of Things (IoT) to collect and process data for decision making. To better support smart cities, data collected by IoT should be stored and processed appropriately. However, IoT devices are often task-specialized and resource-constrained, and thus, they heavily rely on online resources&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.07814v1-abstract-full').style.display = 'inline'; document.getElementById('2103.07814v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.07814v1-abstract-full" style="display: none;"> A smart city improves operational efficiency and comfort of living by harnessing techniques such as the Internet of Things (IoT) to collect and process data for decision making. To better support smart cities, data collected by IoT should be stored and processed appropriately. However, IoT devices are often task-specialized and resource-constrained, and thus, they heavily rely on online resources in terms of computing and storage to accomplish various tasks. Moreover, these cloud-based solutions often centralize the resources and are far away from the end IoTs and cannot respond to users in time due to network congestion when massive numbers of tasks offload through the core network. Therefore, by decentralizing resources spatially close to IoT devices, mobile edge computing (MEC) can reduce latency and improve service quality for a smart city, where service requests can be fulfilled in proximity. As the service demands exhibit spatial-temporal features, deploying MEC servers at optimal locations and allocating MEC resources play an essential role in efficiently meeting service requirements in a smart city. In this regard, it is essential to learn the distribution of resource demands in time and space. In this work, we first propose a spatio-temporal Bayesian hierarchical learning approach to learn and predict the distribution of MEC resource demand over space and time to facilitate MEC deployment and resource management. Second, the proposed model is trained and tested on real-world data, and the results demonstrate that the proposed method can achieve very high accuracy. Third, we demonstrate an application of the proposed method by simulating task offloading. Finally, the simulated results show that resources allocated based upon our models&#39; predictions are exploited more efficiently than the resources are equally divided into all servers in unobserved areas. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.07814v1-abstract-full').style.display = 'none'; document.getElementById('2103.07814v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.01241">arXiv:2012.01241</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2012.01241">pdf</a>, <a href="https://arxiv.org/format/2012.01241">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Channel Attention Networks for Robust MR Fingerprinting Matching </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Soyak%2C+R">Refik Soyak</a>, <a href="/search/cs?searchtype=author&amp;query=Navruz%2C+E">Ebru Navruz</a>, <a href="/search/cs?searchtype=author&amp;query=Ersoy%2C+E+O">Eda Ozgu Ersoy</a>, <a href="/search/cs?searchtype=author&amp;query=Cruz%2C+G">Gastao Cruz</a>, <a href="/search/cs?searchtype=author&amp;query=Prieto%2C+C">Claudia Prieto</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a>, <a href="/search/cs?searchtype=author&amp;query=Unay%2C+D">Devrim Unay</a>, <a href="/search/cs?searchtype=author&amp;query=Oksuz%2C+I">Ilkay Oksuz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.01241v1-abstract-short" style="display: inline;"> Magnetic Resonance Fingerprinting (MRF) enables simultaneous mapping of multiple tissue parameters such as T1 and T2 relaxation times. The working principle of MRF relies on varying acquisition parameters pseudo-randomly, so that each tissue generates its unique signal evolution during scanning. Even though MRF provides faster scanning, it has disadvantages such as erroneous and slow generation of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.01241v1-abstract-full').style.display = 'inline'; document.getElementById('2012.01241v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.01241v1-abstract-full" style="display: none;"> Magnetic Resonance Fingerprinting (MRF) enables simultaneous mapping of multiple tissue parameters such as T1 and T2 relaxation times. The working principle of MRF relies on varying acquisition parameters pseudo-randomly, so that each tissue generates its unique signal evolution during scanning. Even though MRF provides faster scanning, it has disadvantages such as erroneous and slow generation of the corresponding parametric maps, which needs to be improved. Moreover, there is a need for explainable architectures for understanding the guiding signals to generate accurate parametric maps. In this paper, we addressed both of these shortcomings by proposing a novel neural network architecture consisting of a channel-wise attention module and a fully convolutional network. The proposed approach, evaluated over 3 simulated MRF signals, reduces error in the reconstruction of tissue parameters by 8.88% for T1 and 75.44% for T2 with respect to state-of-the-art methods. Another contribution of this study is a new channel selection method: attention-based channel selection. Furthermore, the effect of patch size and temporal frames of MRF signal on channel reduction are analyzed by employing a channel-wise attention. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.01241v1-abstract-full').style.display = 'none'; document.getElementById('2012.01241v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=King%2C+A&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=King%2C+A&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=King%2C+A&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=King%2C+A&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10