CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;26 of 26 results for author: <span class="mathjax">Angelini, E</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Angelini%2C+E">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Angelini, E"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Angelini%2C+E&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Angelini, E"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.08902">arXiv:2501.08902</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.08902">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Multi-View Transformers for Airway-To-Lung Ratio Inference on Cardiac CT Scans: The C4R Study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Naik%2C+S+N">Sneha N. Naik</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E+D">Elsa D. Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Hoffman%2C+E+A">Eric A. Hoffman</a>, <a href="/search/cs?searchtype=author&amp;query=Oelsner%2C+E+C">Elizabeth C. Oelsner</a>, <a href="/search/cs?searchtype=author&amp;query=Barr%2C+R+G">R. Graham Barr</a>, <a href="/search/cs?searchtype=author&amp;query=Smith%2C+B+M">Benjamin M. Smith</a>, <a href="/search/cs?searchtype=author&amp;query=Laine%2C+A+F">Andrew F. Laine</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.08902v1-abstract-short" style="display: inline;"> The ratio of airway tree lumen to lung size (ALR), assessed at full inspiration on high resolution full-lung computed tomography (CT), is a major risk factor for chronic obstructive pulmonary disease (COPD). There is growing interest to infer ALR from cardiac CT images, which are widely available in epidemiological cohorts, to investigate the relationship of ALR to severe COVID-19 and post-acute s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.08902v1-abstract-full').style.display = 'inline'; document.getElementById('2501.08902v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.08902v1-abstract-full" style="display: none;"> The ratio of airway tree lumen to lung size (ALR), assessed at full inspiration on high resolution full-lung computed tomography (CT), is a major risk factor for chronic obstructive pulmonary disease (COPD). There is growing interest to infer ALR from cardiac CT images, which are widely available in epidemiological cohorts, to investigate the relationship of ALR to severe COVID-19 and post-acute sequelae of SARS-CoV-2 infection (PASC). Previously, cardiac scans included approximately 2/3 of the total lung volume with 5-6x greater slice thickness than high-resolution (HR) full-lung (FL) CT. In this study, we present a novel attention-based Multi-view Swin Transformer to infer FL ALR values from segmented cardiac CT scans. For the supervised training we exploit paired full-lung and cardiac CTs acquired in the Multi-Ethnic Study of Atherosclerosis (MESA). Our network significantly outperforms a proxy direct ALR inference on segmented cardiac CT scans and achieves accuracy and reproducibility comparable with a scan-rescan reproducibility of the FL ALR ground-truth. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.08902v1-abstract-full').style.display = 'none'; document.getElementById('2501.08902v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to appear in Proceedings of International Symposium on Biomedical Imaging (ISBI), 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09462">arXiv:2411.09462</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09462">pdf</a>, <a href="https://arxiv.org/format/2411.09462">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SINETRA: a Versatile Framework for Evaluating Single Neuron Tracking in Behaving Animals </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Reme%2C+R">Raphael Reme</a>, <a href="/search/cs?searchtype=author&amp;query=Newson%2C+A">Alasdair Newson</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Olivo-Marin%2C+J">Jean-Christophe Olivo-Marin</a>, <a href="/search/cs?searchtype=author&amp;query=Lagache%2C+T">Thibault Lagache</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09462v2-abstract-short" style="display: inline;"> Accurately tracking neuronal activity in behaving animals presents significant challenges due to complex motions and background noise. The lack of annotated datasets limits the evaluation and improvement of such tracking algorithms. To address this, we developed SINETRA, a versatile simulator that generates synthetic tracking data for particles on a deformable background, closely mimicking live an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09462v2-abstract-full').style.display = 'inline'; document.getElementById('2411.09462v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09462v2-abstract-full" style="display: none;"> Accurately tracking neuronal activity in behaving animals presents significant challenges due to complex motions and background noise. The lack of annotated datasets limits the evaluation and improvement of such tracking algorithms. To address this, we developed SINETRA, a versatile simulator that generates synthetic tracking data for particles on a deformable background, closely mimicking live animal recordings. This simulator produces annotated 2D and 3D videos that reflect the intricate movements seen in behaving animals like Hydra Vulgaris. We evaluated four state-of-the-art tracking algorithms highlighting the current limitations of these methods in challenging scenarios and paving the way for improved cell tracking techniques in dynamic biological systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09462v2-abstract-full').style.display = 'none'; document.getElementById('2411.09462v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 3 figures, submitted at 2025 IEEE International Symposium on Biomedical Imaging (ISBI)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05779">arXiv:2411.05779</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05779">pdf</a>, <a href="https://arxiv.org/format/2411.05779">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Curriculum Learning for Few-Shot Domain Adaptation in CT-based Airway Tree Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jacovella%2C+M">Maxime Jacovella</a>, <a href="/search/cs?searchtype=author&amp;query=Keshavarzi%2C+A">Ali Keshavarzi</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05779v1-abstract-short" style="display: inline;"> Despite advances with deep learning (DL), automated airway segmentation from chest CT scans continues to face challenges in segmentation quality and generalization across cohorts. To address these, we propose integrating Curriculum Learning (CL) into airway segmentation networks, distributing the training set into batches according to ad-hoc complexity scores derived from CT scans and correspondin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05779v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05779v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05779v1-abstract-full" style="display: none;"> Despite advances with deep learning (DL), automated airway segmentation from chest CT scans continues to face challenges in segmentation quality and generalization across cohorts. To address these, we propose integrating Curriculum Learning (CL) into airway segmentation networks, distributing the training set into batches according to ad-hoc complexity scores derived from CT scans and corresponding ground-truth tree features. We specifically investigate few-shot domain adaptation, targeting scenarios where manual annotation of a full fine-tuning dataset is prohibitively expensive. Results are reported on two large open-cohorts (ATM22 and AIIB23) with high performance using CL for full training (Source domain) and few-shot fine-tuning (Target domain), but with also some insights on potential detrimental effects if using a classic Bootstrapping scoring function or if not using proper scan sequencing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05779v1-abstract-full').style.display = 'none'; document.getElementById('2411.05779v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under review for 22nd IEEE International Symposium on Biomedical Imaging (ISBI), Houston, TX, USA</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.20395">arXiv:2407.20395</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.20395">pdf</a>, <a href="https://arxiv.org/format/2407.20395">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Dense Self-Supervised Learning for Medical Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Seince%2C+M">Maxime Seince</a>, <a href="/search/cs?searchtype=author&amp;query=Folgoc%2C+L+L">Loic Le Folgoc</a>, <a href="/search/cs?searchtype=author&amp;query=de+Souza%2C+L+A+F">Luiz Augusto Facury de Souza</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.20395v1-abstract-short" style="display: inline;"> Deep learning has revolutionized medical image segmentation, but it relies heavily on high-quality annotations. The time, cost and expertise required to label images at the pixel-level for each new task has slowed down widespread adoption of the paradigm. We propose Pix2Rep, a self-supervised learning (SSL) approach for few-shot segmentation, that reduces the manual annotation burden by learning p&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.20395v1-abstract-full').style.display = 'inline'; document.getElementById('2407.20395v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.20395v1-abstract-full" style="display: none;"> Deep learning has revolutionized medical image segmentation, but it relies heavily on high-quality annotations. The time, cost and expertise required to label images at the pixel-level for each new task has slowed down widespread adoption of the paradigm. We propose Pix2Rep, a self-supervised learning (SSL) approach for few-shot segmentation, that reduces the manual annotation burden by learning powerful pixel-level representations directly from unlabeled images. Pix2Rep is a novel pixel-level loss and pre-training paradigm for contrastive SSL on whole images. It is applied to generic encoder-decoder deep learning backbones (e.g., U-Net). Whereas most SSL methods enforce invariance of the learned image-level representations under intensity and spatial image augmentations, Pix2Rep enforces equivariance of the pixel-level representations. We demonstrate the framework on a task of cardiac MRI segmentation. Results show improved performance compared to existing semi- and self-supervised approaches; and a 5-fold reduction in the annotation burden for equivalent performance versus a fully supervised U-Net baseline. This includes a 30% (resp. 31%) DICE improvement for one-shot segmentation under linear-probing (resp. fine-tuning). Finally, we also integrate the novel Pix2Rep concept with the Barlow Twins non-contrastive SSL, which leads to even better segmentation performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.20395v1-abstract-full').style.display = 'none'; document.getElementById('2407.20395v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at MIDL 2024</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.4.6; I.4.10 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.10696">arXiv:2407.10696</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.10696">pdf</a>, <a href="https://arxiv.org/format/2407.10696">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Deep ContourFlow: Advancing Active Contours with Deep Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Habis%2C+A">Antoine Habis</a>, <a href="/search/cs?searchtype=author&amp;query=Meas-Yedid%2C+V">Vannary Meas-Yedid</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Olivo-Marin%2C+J">Jean-Christophe Olivo-Marin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.10696v1-abstract-short" style="display: inline;"> This paper introduces a novel approach that combines unsupervised active contour models with deep learning for robust and adaptive image segmentation. Indeed, traditional active contours, provide a flexible framework for contour evolution and learning offers the capacity to learn intricate features and patterns directly from raw data. Our proposed methodology leverages the strengths of both paradi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.10696v1-abstract-full').style.display = 'inline'; document.getElementById('2407.10696v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.10696v1-abstract-full" style="display: none;"> This paper introduces a novel approach that combines unsupervised active contour models with deep learning for robust and adaptive image segmentation. Indeed, traditional active contours, provide a flexible framework for contour evolution and learning offers the capacity to learn intricate features and patterns directly from raw data. Our proposed methodology leverages the strengths of both paradigms, presenting a framework for both unsupervised and one-shot approaches for image segmentation. It is capable of capturing complex object boundaries without the need for extensive labeled training data. This is particularly required in histology, a field facing a significant shortage of annotations due to the challenging and time-consuming nature of the annotation process. We illustrate and compare our results to state of the art methods on a histology dataset and show significant improvements. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.10696v1-abstract-full').style.display = 'none'; document.getElementById('2407.10696v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 12 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.04507">arXiv:2407.04507</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.04507">pdf</a>, <a href="https://arxiv.org/format/2407.04507">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ISBI56570.2024.10635527">10.1109/ISBI56570.2024.10635527 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Few-Shot Airway-Tree Modeling using Data-Driven Sparse Priors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Keshavarzi%2C+A">Ali Keshavarzi</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.04507v1-abstract-short" style="display: inline;"> The lack of large annotated datasets in medical imaging is an intrinsic burden for supervised Deep Learning (DL) segmentation models. Few-shot learning approaches are cost-effective solutions to transfer pre-trained models using only limited annotated data. However, such methods can be prone to overfitting due to limited data diversity especially when segmenting complex, diverse, and sparse tubula&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.04507v1-abstract-full').style.display = 'inline'; document.getElementById('2407.04507v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.04507v1-abstract-full" style="display: none;"> The lack of large annotated datasets in medical imaging is an intrinsic burden for supervised Deep Learning (DL) segmentation models. Few-shot learning approaches are cost-effective solutions to transfer pre-trained models using only limited annotated data. However, such methods can be prone to overfitting due to limited data diversity especially when segmenting complex, diverse, and sparse tubular structures like airways. Furthermore, crafting informative image representations has played a crucial role in medical imaging, enabling discriminative enhancement of anatomical details. In this paper, we initially train a data-driven sparsification module to enhance airways efficiently in lung CT scans. We then incorporate these sparse representations in a standard supervised segmentation pipeline as a pretraining step to enhance the performance of the DL models. Results presented on the ATM public challenge cohort show the effectiveness of using sparse priors in pre-training, leading to segmentation Dice score increase by 1% to 10% in full-scale and few-shot learning scenarios, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.04507v1-abstract-full').style.display = 'none'; document.getElementById('2407.04507v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at 21st IEEE International Symposium on Biomedical Imaging (ISBI)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 92C55 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.4.6 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.00257">arXiv:2403.00257</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.00257">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Robust deep labeling of radiological emphysema subtypes using squeeze and excitation convolutional neural networks: The MESA Lung and SPIROMICS Studies </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wysoczanski%2C+A">Artur Wysoczanski</a>, <a href="/search/cs?searchtype=author&amp;query=Ettehadi%2C+N">Nabil Ettehadi</a>, <a href="/search/cs?searchtype=author&amp;query=Arabshahi%2C+S">Soroush Arabshahi</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Y">Yifei Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Stukovsky%2C+K+H">Karen Hinkley Stukovsky</a>, <a href="/search/cs?searchtype=author&amp;query=Watson%2C+K+E">Karol E. Watson</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+M+K">MeiLan K. Han</a>, <a href="/search/cs?searchtype=author&amp;query=Michos%2C+E+D">Erin D Michos</a>, <a href="/search/cs?searchtype=author&amp;query=Comellas%2C+A+P">Alejandro P. Comellas</a>, <a href="/search/cs?searchtype=author&amp;query=Hoffman%2C+E+A">Eric A. Hoffman</a>, <a href="/search/cs?searchtype=author&amp;query=Laine%2C+A+F">Andrew F. Laine</a>, <a href="/search/cs?searchtype=author&amp;query=Barr%2C+R+G">R. Graham Barr</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E+D">Elsa D. Angelini</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.00257v1-abstract-short" style="display: inline;"> Pulmonary emphysema, the progressive, irreversible loss of lung tissue, is conventionally categorized into three subtypes identifiable on pathology and on lung computed tomography (CT) images. Recent work has led to the unsupervised learning of ten spatially-informed lung texture patterns (sLTPs) on lung CT, representing distinct patterns of emphysematous lung parenchyma based on both textural app&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.00257v1-abstract-full').style.display = 'inline'; document.getElementById('2403.00257v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.00257v1-abstract-full" style="display: none;"> Pulmonary emphysema, the progressive, irreversible loss of lung tissue, is conventionally categorized into three subtypes identifiable on pathology and on lung computed tomography (CT) images. Recent work has led to the unsupervised learning of ten spatially-informed lung texture patterns (sLTPs) on lung CT, representing distinct patterns of emphysematous lung parenchyma based on both textural appearance and spatial location within the lung, and which aggregate into 6 robust and reproducible CT Emphysema Subtypes (CTES). Existing methods for sLTP segmentation, however, are slow and highly sensitive to changes in CT acquisition protocol. In this work, we present a robust 3-D squeeze-and-excitation CNN for supervised classification of sLTPs and CTES on lung CT. Our results demonstrate that this model achieves accurate and reproducible sLTP segmentation on lung CTscans, across two independent cohorts and independently of scanner manufacturer and model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.00257v1-abstract-full').style.display = 'none'; document.getElementById('2403.00257v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.18383">arXiv:2402.18383</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.18383">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Robust Quantification of Percent Emphysema on CT via Domain Attention: the Multi-Ethnic Study of Atherosclerosis (MESA) Lung Study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xuzhe Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E+D">Elsa D. Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Hoffman%2C+E+A">Eric A. Hoffman</a>, <a href="/search/cs?searchtype=author&amp;query=Watson%2C+K+E">Karol E. Watson</a>, <a href="/search/cs?searchtype=author&amp;query=Smith%2C+B+M">Benjamin M. Smith</a>, <a href="/search/cs?searchtype=author&amp;query=Barr%2C+R+G">R. Graham Barr</a>, <a href="/search/cs?searchtype=author&amp;query=Laine%2C+A+F">Andrew F. Laine</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.18383v2-abstract-short" style="display: inline;"> Robust quantification of pulmonary emphysema on computed tomography (CT) remains challenging for large-scale research studies that involve scans from different scanner types and for translation to clinical scans. Existing studies have explored several directions to tackle this challenge, including density correction, noise filtering, regression, hidden Markov measure field (HMMF) model-based segme&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.18383v2-abstract-full').style.display = 'inline'; document.getElementById('2402.18383v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.18383v2-abstract-full" style="display: none;"> Robust quantification of pulmonary emphysema on computed tomography (CT) remains challenging for large-scale research studies that involve scans from different scanner types and for translation to clinical scans. Existing studies have explored several directions to tackle this challenge, including density correction, noise filtering, regression, hidden Markov measure field (HMMF) model-based segmentation, and volume-adjusted lung density. Despite some promising results, previous studies either required a tedious workflow or limited opportunities for downstream emphysema subtyping, limiting efficient adaptation on a large-scale study. To alleviate this dilemma, we developed an end-to-end deep learning framework based on an existing HMMF segmentation framework. We first demonstrate that a regular UNet cannot replicate the existing HMMF results because of the lack of scanner priors. We then design a novel domain attention block to fuse image feature with quantitative scanner priors which significantly improves the results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.18383v2-abstract-full').style.display = 'none'; document.getElementById('2402.18383v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 5 figures. Accepted to IEEE International Symposium on Biomedical Imaging 2024 (ISBI 2024). Camera-ready version</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.08333">arXiv:2402.08333</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.08333">pdf</a>, <a href="https://arxiv.org/format/2402.08333">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Scribble-based fast weak-supervision and interactive corrections for segmenting whole slide images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Habis%2C+A">Antoine Habis</a>, <a href="/search/cs?searchtype=author&amp;query=Nathanson%2C+R+R">Roy Rosman Nathanson</a>, <a href="/search/cs?searchtype=author&amp;query=Meas-Yedid%2C+V">Vannary Meas-Yedid</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E+D">Elsa D. Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Olivo-Marin%2C+J">Jean-Christophe Olivo-Marin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.08333v1-abstract-short" style="display: inline;"> This paper proposes a dynamic interactive and weakly supervised segmentation method with minimal user interactions to address two major challenges in the segmentation of whole slide histopathology images. First, the lack of hand-annotated datasets to train algorithms. Second, the lack of interactive paradigms to enable a dialogue between the pathologist and the machine, which can be a major obstac&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.08333v1-abstract-full').style.display = 'inline'; document.getElementById('2402.08333v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.08333v1-abstract-full" style="display: none;"> This paper proposes a dynamic interactive and weakly supervised segmentation method with minimal user interactions to address two major challenges in the segmentation of whole slide histopathology images. First, the lack of hand-annotated datasets to train algorithms. Second, the lack of interactive paradigms to enable a dialogue between the pathologist and the machine, which can be a major obstacle for use in clinical routine. We therefore propose a fast and user oriented method to bridge this gap by giving the pathologist control over the final result while limiting the number of interactions needed to achieve a good result (over 90\% on all our metrics with only 4 correction scribbles). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.08333v1-abstract-full').style.display = 'none'; document.getElementById('2402.08333v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.09373">arXiv:2303.09373</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.09373">pdf</a>, <a href="https://arxiv.org/format/2303.09373">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> MAPSeg: Unified Unsupervised Domain Adaptation for Heterogeneous Medical Image Segmentation Based on 3D Masked Autoencoding and Pseudo-Labeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xuzhe Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yuhao Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+A">Ang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+J">Jia Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Rasmussen%2C+J+M">Jerod M. Rasmussen</a>, <a href="/search/cs?searchtype=author&amp;query=O%27Connor%2C+T+G">Thomas G. O&#39;Connor</a>, <a href="/search/cs?searchtype=author&amp;query=Wadhwa%2C+P+D">Pathik D. Wadhwa</a>, <a href="/search/cs?searchtype=author&amp;query=Jackowski%2C+A+P">Andrea Parolin Jackowski</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Hai Li</a>, <a href="/search/cs?searchtype=author&amp;query=Posner%2C+J">Jonathan Posner</a>, <a href="/search/cs?searchtype=author&amp;query=Laine%2C+A+F">Andrew F. Laine</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yun Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.09373v3-abstract-short" style="display: inline;"> Robust segmentation is critical for deriving quantitative measures from large-scale, multi-center, and longitudinal medical scans. Manually annotating medical scans, however, is expensive and labor-intensive and may not always be available in every domain. Unsupervised domain adaptation (UDA) is a well-studied technique that alleviates this label-scarcity problem by leveraging available labels fro&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.09373v3-abstract-full').style.display = 'inline'; document.getElementById('2303.09373v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.09373v3-abstract-full" style="display: none;"> Robust segmentation is critical for deriving quantitative measures from large-scale, multi-center, and longitudinal medical scans. Manually annotating medical scans, however, is expensive and labor-intensive and may not always be available in every domain. Unsupervised domain adaptation (UDA) is a well-studied technique that alleviates this label-scarcity problem by leveraging available labels from another domain. In this study, we introduce Masked Autoencoding and Pseudo-Labeling Segmentation (MAPSeg), a $\textbf{unified}$ UDA framework with great versatility and superior performance for heterogeneous and volumetric medical image segmentation. To the best of our knowledge, this is the first study that systematically reviews and develops a framework to tackle four different domain shifts in medical image segmentation. More importantly, MAPSeg is the first framework that can be applied to $\textbf{centralized}$, $\textbf{federated}$, and $\textbf{test-time}$ UDA while maintaining comparable performance. We compare MAPSeg with previous state-of-the-art methods on a private infant brain MRI dataset and a public cardiac CT-MRI dataset, and MAPSeg outperforms others by a large margin (10.5 Dice improvement on the private MRI dataset and 5.7 on the public CT-MRI dataset). MAPSeg poses great practical value and can be applied to real-world problems. GitHub: https://github.com/XuzheZ/MAPSeg/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.09373v3-abstract-full').style.display = 'none'; document.getElementById('2303.09373v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CVPR 2024 camera-ready (8 pages, 3 figures) with the supplemental materials (5 pages, 4 figures). Xuzhe Zhang and Yuhao Wu are co-first authors. Andrew F. Laine and Yun Wang are co-senior supervising authors</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.04238">arXiv:2206.04238</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.04238">pdf</a>, <a href="https://arxiv.org/format/2206.04238">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Cardiac Adipose Tissue Segmentation via Image-Level Annotations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Z">Ziyi Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Gan%2C+Y">Yu Gan</a>, <a href="/search/cs?searchtype=author&amp;query=Lye%2C+T">Theresa Lye</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yanchen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Haofeng Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Laine%2C+A">Andrew Laine</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Hendon%2C+C">Christine Hendon</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.04238v1-abstract-short" style="display: inline;"> Automatically identifying the structural substrates underlying cardiac abnormalities can potentially provide real-time guidance for interventional procedures. With the knowledge of cardiac tissue substrates, the treatment of complex arrhythmias such as atrial fibrillation and ventricular tachycardia can be further optimized by detecting arrhythmia substrates to target for treatment (i.e., adipose)&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.04238v1-abstract-full').style.display = 'inline'; document.getElementById('2206.04238v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.04238v1-abstract-full" style="display: none;"> Automatically identifying the structural substrates underlying cardiac abnormalities can potentially provide real-time guidance for interventional procedures. With the knowledge of cardiac tissue substrates, the treatment of complex arrhythmias such as atrial fibrillation and ventricular tachycardia can be further optimized by detecting arrhythmia substrates to target for treatment (i.e., adipose) and identifying critical structures to avoid. Optical coherence tomography (OCT) is a real-time imaging modality that aids in addressing this need. Existing approaches for cardiac image analysis mainly rely on fully supervised learning techniques, which suffer from the drawback of workload on labor-intensive annotation process of pixel-wise labeling. To lessen the need for pixel-wise labeling, we develop a two-stage deep learning framework for cardiac adipose tissue segmentation using image-level annotations on OCT images of human cardiac substrates. In particular, we integrate class activation mapping with superpixel segmentation to solve the sparse tissue seed challenge raised in cardiac tissue segmentation. Our study bridges the gap between the demand on automatic tissue analysis and the lack of high-quality pixel-wise annotations. To the best of our knowledge, this is the first study that attempts to address cardiac tissue segmentation on OCT images via weakly supervised learning techniques. Within an in-vitro human cardiac OCT dataset, we demonstrate that our weakly supervised approach on image-level annotations achieves comparable performance as fully supervised methods trained on pixel-wise annotations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.04238v1-abstract-full').style.display = 'none'; document.getElementById('2206.04238v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.01014">arXiv:2206.01014</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.01014">pdf</a>, <a href="https://arxiv.org/format/2206.01014">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Suggestive Annotation of Brain MR Images with Gradient-guided Sampling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dai%2C+C">Chengliang Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shuo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Mo%2C+Y">Yuanhan Mo</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+Y">Yike Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+W">Wenjia Bai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.01014v1-abstract-short" style="display: inline;"> Machine learning has been widely adopted for medical image analysis in recent years given its promising performance in image segmentation and classification tasks. The success of machine learning, in particular supervised learning, depends on the availability of manually annotated datasets. For medical imaging applications, such annotated datasets are not easy to acquire, it takes a substantial am&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.01014v1-abstract-full').style.display = 'inline'; document.getElementById('2206.01014v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.01014v1-abstract-full" style="display: none;"> Machine learning has been widely adopted for medical image analysis in recent years given its promising performance in image segmentation and classification tasks. The success of machine learning, in particular supervised learning, depends on the availability of manually annotated datasets. For medical imaging applications, such annotated datasets are not easy to acquire, it takes a substantial amount of time and resource to curate an annotated medical image set. In this paper, we propose an efficient annotation framework for brain MR images that can suggest informative sample images for human experts to annotate. We evaluate the framework on two different brain image analysis tasks, namely brain tumour segmentation and whole brain segmentation. Experiments show that for brain tumour segmentation task on the BraTS 2019 dataset, training a segmentation model with only 7% suggestively annotated image samples can achieve a performance comparable to that of training on the full dataset. For whole brain segmentation on the MALC dataset, training with 42% suggestively annotated image samples can achieve a comparable performance to training on the full dataset. The proposed framework demonstrates a promising way to save manual annotation cost and improve data efficiency in medical imaging applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.01014v1-abstract-full').style.display = 'none'; document.getElementById('2206.01014v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Manuscript accepted by MedIA</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.10074">arXiv:2112.10074</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.10074">pdf</a>, <a href="https://arxiv.org/format/2112.10074">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.59275/j.melba.2022-354b">10.59275/j.melba.2022-354b <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> QU-BraTS: MICCAI BraTS 2020 Challenge on Quantifying Uncertainty in Brain Tumor Segmentation - Analysis of Ranking Scores and Benchmarking Results </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mehta%2C+R">Raghav Mehta</a>, <a href="/search/cs?searchtype=author&amp;query=Filos%2C+A">Angelos Filos</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Sako%2C+C">Chiharu Sako</a>, <a href="/search/cs?searchtype=author&amp;query=McKinley%2C+R">Richard McKinley</a>, <a href="/search/cs?searchtype=author&amp;query=Rebsamen%2C+M">Michael Rebsamen</a>, <a href="/search/cs?searchtype=author&amp;query=Datwyler%2C+K">Katrin Datwyler</a>, <a href="/search/cs?searchtype=author&amp;query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&amp;query=Radojewski%2C+P">Piotr Radojewski</a>, <a href="/search/cs?searchtype=author&amp;query=Murugesan%2C+G+K">Gowtham Krishnan Murugesan</a>, <a href="/search/cs?searchtype=author&amp;query=Nalawade%2C+S">Sahil Nalawade</a>, <a href="/search/cs?searchtype=author&amp;query=Ganesh%2C+C">Chandan Ganesh</a>, <a href="/search/cs?searchtype=author&amp;query=Wagner%2C+B">Ben Wagner</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+F+F">Fang F. Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Fei%2C+B">Baowei Fei</a>, <a href="/search/cs?searchtype=author&amp;query=Madhuranthakam%2C+A+J">Ananth J. Madhuranthakam</a>, <a href="/search/cs?searchtype=author&amp;query=Maldjian%2C+J+A">Joseph A. Maldjian</a>, <a href="/search/cs?searchtype=author&amp;query=Daza%2C+L">Laura Daza</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+C">Catalina Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Arbelaez%2C+P">Pablo Arbelaez</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+C">Chengliang Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shuo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Reynaud%2C+H">Hadrien Reynaud</a>, <a href="/search/cs?searchtype=author&amp;query=Mo%2C+Y">Yuan-han Mo</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a> , et al. (67 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.10074v2-abstract-short" style="display: inline;"> Deep learning (DL) models have provided state-of-the-art performance in various medical imaging benchmarking challenges, including the Brain Tumor Segmentation (BraTS) challenges. However, the task of focal pathology multi-compartment segmentation (e.g., tumor and lesion sub-regions) is particularly challenging, and potential errors hinder translating DL models into clinical workflows. Quantifying&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.10074v2-abstract-full').style.display = 'inline'; document.getElementById('2112.10074v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.10074v2-abstract-full" style="display: none;"> Deep learning (DL) models have provided state-of-the-art performance in various medical imaging benchmarking challenges, including the Brain Tumor Segmentation (BraTS) challenges. However, the task of focal pathology multi-compartment segmentation (e.g., tumor and lesion sub-regions) is particularly challenging, and potential errors hinder translating DL models into clinical workflows. Quantifying the reliability of DL model predictions in the form of uncertainties could enable clinical review of the most uncertain regions, thereby building trust and paving the way toward clinical translation. Several uncertainty estimation methods have recently been introduced for DL medical image segmentation tasks. Developing scores to evaluate and compare the performance of uncertainty measures will assist the end-user in making more informed decisions. In this study, we explore and evaluate a score developed during the BraTS 2019 and BraTS 2020 task on uncertainty quantification (QU-BraTS) and designed to assess and rank uncertainty estimates for brain tumor multi-compartment segmentation. This score (1) rewards uncertainty estimates that produce high confidence in correct assertions and those that assign low confidence levels at incorrect assertions, and (2) penalizes uncertainty measures that lead to a higher percentage of under-confident correct assertions. We further benchmark the segmentation uncertainties generated by 14 independent participating teams of QU-BraTS 2020, all of which also participated in the main BraTS segmentation task. Overall, our findings confirm the importance and complementary value that uncertainty estimates provide to segmentation algorithms, highlighting the need for uncertainty quantification in medical image analyses. Finally, in favor of transparency and reproducibility, our evaluation code is made publicly available at: https://github.com/RagMeh11/QU-BraTS. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.10074v2-abstract-full').style.display = 'none'; document.getElementById('2112.10074v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at the Journal of Machine Learning for Biomedical Imaging (MELBA): https://www.melba-journal.org/papers/2022:026.html</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Machine.Learning.for.Biomedical.Imaging. 1 (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.07608">arXiv:2106.07608</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.07608">pdf</a>, <a href="https://arxiv.org/format/2106.07608">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Recursive Refinement Network for Deformable Lung Registration between Exhale and Inhale CT Scans </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=He%2C+X">Xinzi He</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+J">Jia Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xuzhe Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Bi%2C+H">Hanwen Bi</a>, <a href="/search/cs?searchtype=author&amp;query=Gerard%2C+S">Sarah Gerard</a>, <a href="/search/cs?searchtype=author&amp;query=Kaczka%2C+D">David Kaczka</a>, <a href="/search/cs?searchtype=author&amp;query=Motahari%2C+A">Amin Motahari</a>, <a href="/search/cs?searchtype=author&amp;query=Hoffman%2C+E">Eric Hoffman</a>, <a href="/search/cs?searchtype=author&amp;query=Reinhardt%2C+J">Joseph Reinhardt</a>, <a href="/search/cs?searchtype=author&amp;query=Barr%2C+R+G">R. Graham Barr</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Laine%2C+A">Andrew Laine</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.07608v1-abstract-short" style="display: inline;"> Unsupervised learning-based medical image registration approaches have witnessed rapid development in recent years. We propose to revisit a commonly ignored while simple and well-established principle: recursive refinement of deformation vector fields across scales. We introduce a recursive refinement network (RRN) for unsupervised medical image registration, to extract multi-scale features, const&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.07608v1-abstract-full').style.display = 'inline'; document.getElementById('2106.07608v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.07608v1-abstract-full" style="display: none;"> Unsupervised learning-based medical image registration approaches have witnessed rapid development in recent years. We propose to revisit a commonly ignored while simple and well-established principle: recursive refinement of deformation vector fields across scales. We introduce a recursive refinement network (RRN) for unsupervised medical image registration, to extract multi-scale features, construct normalized local cost correlation volume and recursively refine volumetric deformation vector fields. RRN achieves state of the art performance for 3D registration of expiratory-inspiratory pairs of CT lung scans. On DirLab COPDGene dataset, RRN returns an average Target Registration Error (TRE) of 0.83 mm, which corresponds to a 13% error reduction from the best result presented in the leaderboard. In addition to comparison with conventional methods, RRN leads to 89% error reduction compared to deep-learning-based peer approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.07608v1-abstract-full').style.display = 'none'; document.getElementById('2106.07608v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.00523">arXiv:2102.00523</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.00523">pdf</a>, <a href="https://arxiv.org/ps/2102.00523">ps</a>, <a href="https://arxiv.org/format/2102.00523">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Co-Seg: An Image Segmentation Framework Against Label Corruption </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Z">Ziyi Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Haofeng Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Laine%2C+A">Andrew Laine</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Hendon%2C+C">Christine Hendon</a>, <a href="/search/cs?searchtype=author&amp;query=Gan%2C+Y">Yu Gan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.00523v1-abstract-short" style="display: inline;"> Supervised deep learning performance is heavily tied to the availability of high-quality labels for training. Neural networks can gradually overfit corrupted labels if directly trained on noisy datasets, leading to severe performance degradation at test time. In this paper, we propose a novel deep learning framework, namely Co-Seg, to collaboratively train segmentation networks on datasets which i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.00523v1-abstract-full').style.display = 'inline'; document.getElementById('2102.00523v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.00523v1-abstract-full" style="display: none;"> Supervised deep learning performance is heavily tied to the availability of high-quality labels for training. Neural networks can gradually overfit corrupted labels if directly trained on noisy datasets, leading to severe performance degradation at test time. In this paper, we propose a novel deep learning framework, namely Co-Seg, to collaboratively train segmentation networks on datasets which include low-quality noisy labels. Our approach first trains two networks simultaneously to sift through all samples and obtain a subset with reliable labels. Then, an efficient yet easily-implemented label correction strategy is applied to enrich the reliable subset. Finally, using the updated dataset, we retrain the segmentation network to finalize its parameters. Experiments in two noisy labels scenarios demonstrate that our proposed model can achieve results comparable to those obtained from supervised learning trained on the noise-free labels. In addition, our framework can be easily implemented in any segmentation algorithm to increase its robustness to noisy labels. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.00523v1-abstract-full').style.display = 'none'; document.getElementById('2102.00523v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.04978">arXiv:2007.04978</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2007.04978">pdf</a>, <a href="https://arxiv.org/format/2007.04978">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Novel Subtypes of Pulmonary Emphysema Based on Spatially-Informed Lung Texture Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jie Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E+D">Elsa D. Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Balte%2C+P+P">Pallavi P. Balte</a>, <a href="/search/cs?searchtype=author&amp;query=Hoffman%2C+E+A">Eric A. Hoffman</a>, <a href="/search/cs?searchtype=author&amp;query=Austin%2C+J+H+M">John H. M. Austin</a>, <a href="/search/cs?searchtype=author&amp;query=Smith%2C+B+M">Benjamin M. Smith</a>, <a href="/search/cs?searchtype=author&amp;query=Barr%2C+R+G">R. Graham Barr</a>, <a href="/search/cs?searchtype=author&amp;query=Laine%2C+A+F">Andrew F. Laine</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.04978v1-abstract-short" style="display: inline;"> Pulmonary emphysema overlaps considerably with chronic obstructive pulmonary disease (COPD), and is traditionally subcategorized into three subtypes previously identified on autopsy. Unsupervised learning of emphysema subtypes on computed tomography (CT) opens the way to new definitions of emphysema subtypes and eliminates the need of thorough manual labeling. However, CT-based emphysema subtypes&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.04978v1-abstract-full').style.display = 'inline'; document.getElementById('2007.04978v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.04978v1-abstract-full" style="display: none;"> Pulmonary emphysema overlaps considerably with chronic obstructive pulmonary disease (COPD), and is traditionally subcategorized into three subtypes previously identified on autopsy. Unsupervised learning of emphysema subtypes on computed tomography (CT) opens the way to new definitions of emphysema subtypes and eliminates the need of thorough manual labeling. However, CT-based emphysema subtypes have been limited to texture-based patterns without considering spatial location. In this work, we introduce a standardized spatial mapping of the lung for quantitative study of lung texture location, and propose a novel framework for combining spatial and texture information to discover spatially-informed lung texture patterns (sLTPs) that represent novel emphysema subtypes. Exploiting two cohorts of full-lung CT scans from the MESA COPD and EMCAP studies, we first show that our spatial mapping enables population-wide study of emphysema spatial location. We then evaluate the characteristics of the sLTPs discovered on MESA COPD, and show that they are reproducible, able to encode standard emphysema subtypes, and associated with physiological symptoms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.04978v1-abstract-full').style.display = 'none'; document.getElementById('2007.04978v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.14984">arXiv:2006.14984</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2006.14984">pdf</a>, <a href="https://arxiv.org/format/2006.14984">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Suggestive Annotation of Brain Tumour Images with Gradient-guided Sampling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dai%2C+C">Chengliang Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shuo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Mo%2C+Y">Yuanhan Mo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+K">Kaichen Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+Y">Yike Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+W">Wenjia Bai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.14984v2-abstract-short" style="display: inline;"> Machine learning has been widely adopted for medical image analysis in recent years given its promising performance in image segmentation and classification tasks. As a data-driven science, the success of machine learning, in particular supervised learning, largely depends on the availability of manually annotated datasets. For medical imaging applications, such annotated datasets are not easy to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.14984v2-abstract-full').style.display = 'inline'; document.getElementById('2006.14984v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.14984v2-abstract-full" style="display: none;"> Machine learning has been widely adopted for medical image analysis in recent years given its promising performance in image segmentation and classification tasks. As a data-driven science, the success of machine learning, in particular supervised learning, largely depends on the availability of manually annotated datasets. For medical imaging applications, such annotated datasets are not easy to acquire. It takes a substantial amount of time and resource to curate an annotated medical image set. In this paper, we propose an efficient annotation framework for brain tumour images that is able to suggest informative sample images for human experts to annotate. Our experiments show that training a segmentation model with only 19% suggestively annotated patient scans from BraTS 2019 dataset can achieve a comparable performance to training a model on the full dataset for whole tumour segmentation task. It demonstrates a promising way to save manual annotation cost and improve data efficiency in medical imaging applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.14984v2-abstract-full').style.display = 'none'; document.getElementById('2006.14984v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Paper accepted by MICCAI 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.00440">arXiv:2002.00440</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2002.00440">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Simultaneous Left Atrium Anatomy and Scar Segmentations via Deep Learning in Multiview Information with Attention </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+G">Guang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jun Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+Z">Zhifan Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+S">Shuo Li</a>, <a href="/search/cs?searchtype=author&amp;query=Ni%2C+H">Hao Ni</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Wong%2C+T">Tom Wong</a>, <a href="/search/cs?searchtype=author&amp;query=Mohiaddin%2C+R">Raad Mohiaddin</a>, <a href="/search/cs?searchtype=author&amp;query=Nyktari%2C+E">Eva Nyktari</a>, <a href="/search/cs?searchtype=author&amp;query=Wage%2C+R">Ricardo Wage</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+L">Lei Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yanping Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+X">Xiuquan Du</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Heye Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Firmin%2C+D">David Firmin</a>, <a href="/search/cs?searchtype=author&amp;query=Keegan%2C+J">Jennifer Keegan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.00440v1-abstract-short" style="display: inline;"> Three-dimensional late gadolinium enhanced (LGE) cardiac MR (CMR) of left atrial scar in patients with atrial fibrillation (AF) has recently emerged as a promising technique to stratify patients, to guide ablation therapy and to predict treatment success. This requires a segmentation of the high intensity scar tissue and also a segmentation of the left atrium (LA) anatomy, the latter usually being&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.00440v1-abstract-full').style.display = 'inline'; document.getElementById('2002.00440v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.00440v1-abstract-full" style="display: none;"> Three-dimensional late gadolinium enhanced (LGE) cardiac MR (CMR) of left atrial scar in patients with atrial fibrillation (AF) has recently emerged as a promising technique to stratify patients, to guide ablation therapy and to predict treatment success. This requires a segmentation of the high intensity scar tissue and also a segmentation of the left atrium (LA) anatomy, the latter usually being derived from a separate bright-blood acquisition. Performing both segmentations automatically from a single 3D LGE CMR acquisition would eliminate the need for an additional acquisition and avoid subsequent registration issues. In this paper, we propose a joint segmentation method based on multiview two-task (MVTT) recursive attention model working directly on 3D LGE CMR images to segment the LA (and proximal pulmonary veins) and to delineate the scar on the same dataset. Using our MVTT recursive attention model, both the LA anatomy and scar can be segmented accurately (mean Dice score of 93% for the LA anatomy and 87% for the scar segmentations) and efficiently (~0.27 seconds to simultaneously segment the LA anatomy and scars directly from the 3D LGE CMR dataset with 60-68 2D slices). Compared to conventional unsupervised learning and other state-of-the-art deep learning based methods, the proposed MVTT model achieved excellent results, leading to an automatic generation of a patient-specific anatomical model combined with scar segmentation for patients in AF. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.00440v1-abstract-full').style.display = 'none'; document.getElementById('2002.00440v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">34 pages, 10 figures, 7 tables, accepted by Future Generation Computer Systems journal</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.08483">arXiv:1911.08483</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1911.08483">pdf</a>, <a href="https://arxiv.org/format/1911.08483">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Automatic Brain Tumour Segmentation and Biophysics-Guided Survival Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shuo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+C">Chengliang Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Mo%2C+Y">Yuanhan Mo</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+Y">Yike Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+W">Wenjia Bai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.08483v1-abstract-short" style="display: inline;"> Gliomas are the most common malignant brain tumourswith intrinsic heterogeneity. Accurate segmentation of gliomas and theirsub-regions on multi-parametric magnetic resonance images (mpMRI)is of great clinical importance, which defines tumour size, shape andappearance and provides abundant information for preoperative diag-nosis, treatment planning and survival prediction. Recent developmentson dee&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.08483v1-abstract-full').style.display = 'inline'; document.getElementById('1911.08483v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.08483v1-abstract-full" style="display: none;"> Gliomas are the most common malignant brain tumourswith intrinsic heterogeneity. Accurate segmentation of gliomas and theirsub-regions on multi-parametric magnetic resonance images (mpMRI)is of great clinical importance, which defines tumour size, shape andappearance and provides abundant information for preoperative diag-nosis, treatment planning and survival prediction. Recent developmentson deep learning have significantly improved the performance of auto-mated medical image segmentation. In this paper, we compare severalstate-of-the-art convolutional neural network models for brain tumourimage segmentation. Based on the ensembled segmentation, we presenta biophysics-guided prognostic model for patient overall survival predic-tion which outperforms a data-driven radiomics approach. Our methodwon the second place of the MICCAI 2019 BraTS Challenge for theoverall survival prediction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.08483v1-abstract-full').style.display = 'none'; document.getElementById('1911.08483v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">MICCAI BraTS 2019 Challenge</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1908.10851">arXiv:1908.10851</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1908.10851">pdf</a>, <a href="https://arxiv.org/format/1908.10851">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Transfer Learning from Partial Annotations for Whole Brain Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dai%2C+C">Chengliang Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Mo%2C+Y">Yuanhan Mo</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+Y">Yike Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+W">Wenjia Bai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1908.10851v1-abstract-short" style="display: inline;"> Brain MR image segmentation is a key task in neuroimaging studies. It is commonly conducted using standard computational tools, such as FSL, SPM, multi-atlas segmentation etc, which are often registration-based and suffer from expensive computation cost. Recently, there is an increased interest using deep neural networks for brain image segmentation, which have demonstrated advantages in both spee&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.10851v1-abstract-full').style.display = 'inline'; document.getElementById('1908.10851v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1908.10851v1-abstract-full" style="display: none;"> Brain MR image segmentation is a key task in neuroimaging studies. It is commonly conducted using standard computational tools, such as FSL, SPM, multi-atlas segmentation etc, which are often registration-based and suffer from expensive computation cost. Recently, there is an increased interest using deep neural networks for brain image segmentation, which have demonstrated advantages in both speed and performance. However, neural networks-based approaches normally require a large amount of manual annotations for optimising the massive amount of network parameters. For 3D networks used in volumetric image segmentation, this has become a particular challenge, as a 3D network consists of many more parameters compared to its 2D counterpart. Manual annotation of 3D brain images is extremely time-consuming and requires extensive involvement of trained experts. To address the challenge with limited manual annotations, here we propose a novel multi-task learning framework for brain image segmentation, which utilises a large amount of automatically generated partial annotations together with a small set of manually created full annotations for network training. Our method yields a high performance comparable to state-of-the-art methods for whole brain segmentation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.10851v1-abstract-full').style.display = 'none'; document.getElementById('1908.10851v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1902.02629">arXiv:1902.02629</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1902.02629">pdf</a>, <a href="https://arxiv.org/format/1902.02629">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> SAPSAM - Sparsely Annotated Pathological Sign Activation Maps - A novel approach to train Convolutional Neural Networks on lung CT scans using binary labels only </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zusag%2C+M">Mario Zusag</a>, <a href="/search/cs?searchtype=author&amp;query=Desai%2C+S">Sujal Desai</a>, <a href="/search/cs?searchtype=author&amp;query=Di+Paolo%2C+M">Marcello Di Paolo</a>, <a href="/search/cs?searchtype=author&amp;query=Semple%2C+T">Thomas Semple</a>, <a href="/search/cs?searchtype=author&amp;query=Shah%2C+A">Anand Shah</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1902.02629v1-abstract-short" style="display: inline;"> Chronic Pulmonary Aspergillosis (CPA) is a complex lung disease caused by infection with Aspergillus. Computed tomography (CT) images are frequently requested in patients with suspected and established disease, but the radiological signs on CT are difficult to quantify making accurate follow-up challenging. We propose a novel method to train Convolutional Neural Networks using only regional labels&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.02629v1-abstract-full').style.display = 'inline'; document.getElementById('1902.02629v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1902.02629v1-abstract-full" style="display: none;"> Chronic Pulmonary Aspergillosis (CPA) is a complex lung disease caused by infection with Aspergillus. Computed tomography (CT) images are frequently requested in patients with suspected and established disease, but the radiological signs on CT are difficult to quantify making accurate follow-up challenging. We propose a novel method to train Convolutional Neural Networks using only regional labels on the presence of pathological signs, to not only detect CPA, but also spatially localize pathological signs. We use average intensity projections within different ranges of Hounsfield-unit (HU) values, transforming input 3D CT scans into 2D RGB-like images. CNN architectures are trained for hierarchical tasks, leading to precise activation maps of pathological patterns. Results on a cohort of 352 subjects demonstrate high classification accuracy, localization precision and predictive power of 2 year survival. Such tool opens the way to CPA patient stratification and quantitative follow-up of CPA pathological signs, for patients under drug therapy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.02629v1-abstract-full').style.display = 'none'; document.getElementById('1902.02629v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted paper for ISBI2019</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> https://biomedicalimaging.org/2019/ </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1812.07749">arXiv:1812.07749</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1812.07749">pdf</a>, <a href="https://arxiv.org/format/1812.07749">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Discriminative analysis of the human cortex using spherical CNNs - a study on Alzheimer&#39;s disease diagnosis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Feng%2C+X">Xinyang Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jie Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Laine%2C+A+F">Andrew F. Laine</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E+D">Elsa D. Angelini</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1812.07749v1-abstract-short" style="display: inline;"> In neuroimaging studies, the human cortex is commonly modeled as a sphere to preserve the topological structure of the cortical surface. Cortical neuroimaging measures hence can be modeled in spherical representation. In this work, we explore analyzing the human cortex using spherical CNNs in an Alzheimer&#39;s disease (AD) classification task using cortical morphometric measures derived from structur&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.07749v1-abstract-full').style.display = 'inline'; document.getElementById('1812.07749v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1812.07749v1-abstract-full" style="display: none;"> In neuroimaging studies, the human cortex is commonly modeled as a sphere to preserve the topological structure of the cortical surface. Cortical neuroimaging measures hence can be modeled in spherical representation. In this work, we explore analyzing the human cortex using spherical CNNs in an Alzheimer&#39;s disease (AD) classification task using cortical morphometric measures derived from structural MRI. Our results show superior performance in classifying AD versus cognitively normal and in predicting MCI progression within two years, using structural MRI information only. This work demonstrates for the first time the potential of the spherical CNNs framework in the discriminative analysis of the human cortex and could be extended to other modalities and other neurological diseases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.07749v1-abstract-full').style.display = 'none'; document.getElementById('1812.07749v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 December, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.04597">arXiv:1806.04597</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1806.04597">pdf</a>, <a href="https://arxiv.org/format/1806.04597">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Multiview Two-Task Recursive Attention Model for Left Atrium and Atrial Scars Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jun Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+G">Guang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+Z">Zhifan Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Ni%2C+H">Hao Ni</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Mohiaddin%2C+R">Raad Mohiaddin</a>, <a href="/search/cs?searchtype=author&amp;query=Wong%2C+T">Tom Wong</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yanping Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+X">Xiuquan Du</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Heye Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Keegan%2C+J">Jennifer Keegan</a>, <a href="/search/cs?searchtype=author&amp;query=Firmin%2C+D">David Firmin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.04597v1-abstract-short" style="display: inline;"> Late Gadolinium Enhanced Cardiac MRI (LGE-CMRI) for detecting atrial scars in atrial fibrillation (AF) patients has recently emerged as a promising technique to stratify patients, guide ablation therapy and predict treatment success. Visualisation and quantification of scar tissues require a segmentation of both the left atrium (LA) and the high intensity scar regions from LGE-CMRI images. These t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.04597v1-abstract-full').style.display = 'inline'; document.getElementById('1806.04597v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.04597v1-abstract-full" style="display: none;"> Late Gadolinium Enhanced Cardiac MRI (LGE-CMRI) for detecting atrial scars in atrial fibrillation (AF) patients has recently emerged as a promising technique to stratify patients, guide ablation therapy and predict treatment success. Visualisation and quantification of scar tissues require a segmentation of both the left atrium (LA) and the high intensity scar regions from LGE-CMRI images. These two segmentation tasks are challenging due to the cancelling of healthy tissue signal, low signal-to-noise ratio and often limited image quality in these patients. Most approaches require manual supervision and/or a second bright-blood MRI acquisition for anatomical segmentation. Segmenting both the LA anatomy and the scar tissues automatically from a single LGE-CMRI acquisition is highly in demand. In this study, we proposed a novel fully automated multiview two-task (MVTT) recursive attention model working directly on LGE-CMRI images that combines a sequential learning and a dilated residual learning to segment the LA (including attached pulmonary veins) and delineate the atrial scars simultaneously via an innovative attention model. Compared to other state-of-the-art methods, the proposed MVTT achieves compelling improvement, enabling to generate a patient-specific anatomical and atrial scar assessment model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.04597v1-abstract-full').style.display = 'none'; document.getElementById('1806.04597v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 4 figures, accepted by MICCAI 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1707.01086">arXiv:1707.01086</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1707.01086">pdf</a>, <a href="https://arxiv.org/format/1707.01086">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-319-66179-7_65">10.1007/978-3-319-66179-7_65 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Discriminative Localization in CNNs for Weakly-Supervised Segmentation of Pulmonary Nodules </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Feng%2C+X">Xinyang Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jie Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Laine%2C+A+F">Andrew F. Laine</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E+D">Elsa D. Angelini</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1707.01086v2-abstract-short" style="display: inline;"> Automated detection and segmentation of pulmonary nodules on lung computed tomography (CT) scans can facilitate early lung cancer diagnosis. Existing supervised approaches for automated nodule segmentation on CT scans require voxel-based annotations for training, which are labor- and time-consuming to obtain. In this work, we propose a weakly-supervised method that generates accurate voxel-level n&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.01086v2-abstract-full').style.display = 'inline'; document.getElementById('1707.01086v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1707.01086v2-abstract-full" style="display: none;"> Automated detection and segmentation of pulmonary nodules on lung computed tomography (CT) scans can facilitate early lung cancer diagnosis. Existing supervised approaches for automated nodule segmentation on CT scans require voxel-based annotations for training, which are labor- and time-consuming to obtain. In this work, we propose a weakly-supervised method that generates accurate voxel-level nodule segmentation trained with image-level labels only. By adapting a convolutional neural network (CNN) trained for image classification, our proposed method learns discriminative regions from the activation maps of convolution units at different scales, and identifies the true nodule location with a novel candidate-screening framework. Experimental results on the public LIDC-IDRI dataset demonstrate that, our weakly-supervised nodule segmentation framework achieves competitive performance compared to a fully-supervised CNN-based segmentation method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.01086v2-abstract-full').style.display = 'none'; document.getElementById('1707.01086v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI) 2017 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1612.01820">arXiv:1612.01820</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1612.01820">pdf</a>, <a href="https://arxiv.org/format/1612.01820">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Explaining Radiological Emphysema Subtypes with Unsupervised Texture Prototypes: MESA COPD Study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jie Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E+D">Elsa D. Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Smith%2C+B+M">Benjamin M. Smith</a>, <a href="/search/cs?searchtype=author&amp;query=Austin%2C+J+H+M">John H. M. Austin</a>, <a href="/search/cs?searchtype=author&amp;query=Hoffman%2C+E+A">Eric A. Hoffman</a>, <a href="/search/cs?searchtype=author&amp;query=Bluemke%2C+D+A">David A. Bluemke</a>, <a href="/search/cs?searchtype=author&amp;query=Barr%2C+R+G">R. Graham Barr</a>, <a href="/search/cs?searchtype=author&amp;query=Laine%2C+A+F">Andrew F. Laine</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1612.01820v1-abstract-short" style="display: inline;"> Pulmonary emphysema is traditionally subcategorized into three subtypes, which have distinct radiological appearances on computed tomography (CT) and can help with the diagnosis of chronic obstructive pulmonary disease (COPD). Automated texture-based quantification of emphysema subtypes has been successfully implemented via supervised learning of these three emphysema subtypes. In this work, we de&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1612.01820v1-abstract-full').style.display = 'inline'; document.getElementById('1612.01820v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1612.01820v1-abstract-full" style="display: none;"> Pulmonary emphysema is traditionally subcategorized into three subtypes, which have distinct radiological appearances on computed tomography (CT) and can help with the diagnosis of chronic obstructive pulmonary disease (COPD). Automated texture-based quantification of emphysema subtypes has been successfully implemented via supervised learning of these three emphysema subtypes. In this work, we demonstrate that unsupervised learning on a large heterogeneous database of CT scans can generate texture prototypes that are visually homogeneous and distinct, reproducible across subjects, and capable of predicting accurately the three standard radiological subtypes. These texture prototypes enable automated labeling of lung volumes, and open the way to new interpretations of lung CT scans with finer subtyping of emphysema. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1612.01820v1-abstract-full').style.display = 'none'; document.getElementById('1612.01820v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">MICCAI workshop on Medical Computer Vision: Algorithms for Big Data (2016)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1004.5305">arXiv:1004.5305</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1004.5305">pdf</a>, <a href="https://arxiv.org/ps/1004.5305">ps</a>, <a href="https://arxiv.org/format/1004.5305">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1364/OL.35.000871">10.1364/OL.35.000871 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Compressed Sensing with off-axis frequency-shifting holography </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Marim%2C+M">Marcio Marim</a>, <a href="/search/cs?searchtype=author&amp;query=Atlan%2C+M">Michael Atlan</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a>, <a href="/search/cs?searchtype=author&amp;query=Olivo-Marin%2C+J">Jean-Christophe Olivo-Marin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1004.5305v1-abstract-short" style="display: inline;"> This work reveals an experimental microscopy acquisition scheme successfully combining Compressed Sensing (CS) and digital holography in off-axis and frequency-shifting conditions. CS is a recent data acquisition theory involving signal reconstruction from randomly undersampled measurements, exploiting the fact that most images present some compact structure and redundancy. We propose a genuine CS&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1004.5305v1-abstract-full').style.display = 'inline'; document.getElementById('1004.5305v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1004.5305v1-abstract-full" style="display: none;"> This work reveals an experimental microscopy acquisition scheme successfully combining Compressed Sensing (CS) and digital holography in off-axis and frequency-shifting conditions. CS is a recent data acquisition theory involving signal reconstruction from randomly undersampled measurements, exploiting the fact that most images present some compact structure and redundancy. We propose a genuine CS-based imaging scheme for sparse gradient images, acquiring a diffraction map of the optical field with holographic microscopy and recovering the signal from as little as 7% of random measurements. We report experimental results demonstrating how CS can lead to an elegant and effective way to reconstruct images, opening the door for new microscopy applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1004.5305v1-abstract-full').style.display = 'none'; document.getElementById('1004.5305v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 April, 2010; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2010. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">vol 35, pp 871-873</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10