CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;23 of 23 results for author: <span class="mathjax">Wiest, R</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Wiest%2C+R">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Wiest, R"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Wiest%2C+R&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Wiest, R"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.11142">arXiv:2408.11142</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.11142">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ISLES 2024: The first longitudinal multimodal multi-center real-world dataset in (sub-)acute stroke </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Riedel%2C+E+O">Evamaria O. Riedel</a>, <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Baran%2C+T+A">The Anh Baran</a>, <a href="/search/cs?searchtype=author&amp;query=Petzsche%2C+M+H">Moritz Hernandez Petzsche</a>, <a href="/search/cs?searchtype=author&amp;query=Baazaoui%2C+H">Hakim Baazaoui</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Robben%2C+D">David Robben</a>, <a href="/search/cs?searchtype=author&amp;query=Seia%2C+J+O">Joaquin Oscar Seia</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+R">Ruisheng Su</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmer%2C+C">Claus Zimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Boeckh-Behrens%2C+T">Tobias Boeckh-Behrens</a>, <a href="/search/cs?searchtype=author&amp;query=Berndt%2C+M">Maria Berndt</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Wegener%2C+S">Susanne Wegener</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.11142v1-abstract-short" style="display: inline;"> Stroke remains a leading cause of global morbidity and mortality, placing a heavy socioeconomic burden. Over the past decade, advances in endovascular reperfusion therapy and the use of CT and MRI imaging for treatment guidance have significantly improved patient outcomes and are now standard in clinical practice. To develop machine learning algorithms that can extract meaningful and reproducible&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11142v1-abstract-full').style.display = 'inline'; document.getElementById('2408.11142v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.11142v1-abstract-full" style="display: none;"> Stroke remains a leading cause of global morbidity and mortality, placing a heavy socioeconomic burden. Over the past decade, advances in endovascular reperfusion therapy and the use of CT and MRI imaging for treatment guidance have significantly improved patient outcomes and are now standard in clinical practice. To develop machine learning algorithms that can extract meaningful and reproducible models of brain function for both clinical and research purposes from stroke images - particularly for lesion identification, brain health quantification, and prognosis - large, diverse, and well-annotated public datasets are essential. While only a few datasets with (sub-)acute stroke data were previously available, several large, high-quality datasets have recently been made publicly accessible. However, these existing datasets include only MRI data. In contrast, our dataset is the first to offer comprehensive longitudinal stroke data, including acute CT imaging with angiography and perfusion, follow-up MRI at 2-9 days, as well as acute and longitudinal clinical data up to a three-month outcome. The dataset includes a training dataset of n = 150 and a test dataset of n = 100 scans. Training data is publicly available, while test data will be used exclusively for model validation. We are making this dataset available as part of the 2024 edition of the Ischemic Stroke Lesion Segmentation (ISLES) challenge (https://www.isles-challenge.org/), which continuously aims to establish benchmark methods for acute and sub-acute ischemic stroke lesion segmentation, aiding in creating open stroke imaging datasets and evaluating cutting-edge image processing algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11142v1-abstract-full').style.display = 'none'; document.getElementById('2408.11142v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.10966">arXiv:2408.10966</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.10966">pdf</a>, <a href="https://arxiv.org/format/2408.10966">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ISLES&#39;24: Improving final infarct prediction in ischemic stroke using multimodal imaging and clinical data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+R">Ruisheng Su</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Riedel%2C+E+O">Evamaria O. Riedel</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Baazaoui%2C+H">Hakim Baazaoui</a>, <a href="/search/cs?searchtype=author&amp;query=Robben%2C+D">David Robben</a>, <a href="/search/cs?searchtype=author&amp;query=Wegener%2C+S">Susanne Wegener</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.10966v1-abstract-short" style="display: inline;"> Accurate estimation of core (irreversibly damaged tissue) and penumbra (salvageable tissue) volumes is essential for ischemic stroke treatment decisions. Perfusion CT, the clinical standard, estimates these volumes but is affected by variations in deconvolution algorithms, implementations, and thresholds. Core tissue expands over time, with growth rates influenced by thrombus location, collateral&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10966v1-abstract-full').style.display = 'inline'; document.getElementById('2408.10966v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.10966v1-abstract-full" style="display: none;"> Accurate estimation of core (irreversibly damaged tissue) and penumbra (salvageable tissue) volumes is essential for ischemic stroke treatment decisions. Perfusion CT, the clinical standard, estimates these volumes but is affected by variations in deconvolution algorithms, implementations, and thresholds. Core tissue expands over time, with growth rates influenced by thrombus location, collateral circulation, and inherent patient-specific factors. Understanding this tissue growth is crucial for determining the need to transfer patients to comprehensive stroke centers, predicting the benefits of additional reperfusion attempts during mechanical thrombectomy, and forecasting final clinical outcomes. This work presents the ISLES&#39;24 challenge, which addresses final post-treatment stroke infarct prediction from pre-interventional acute stroke imaging and clinical data. ISLES&#39;24 establishes a unique 360-degree setting where all feasibly accessible clinical data are available for participants, including full CT acute stroke imaging, sub-acute follow-up MRI, and clinical tabular data. The contributions of this work are two-fold: first, we introduce a standardized benchmarking of final stroke infarct segmentation algorithms through the ISLES&#39;24 challenge; second, we provide insights into infarct segmentation using multimodal imaging and clinical data strategies by identifying outperforming methods on a finely curated dataset. The outputs of this challenge are anticipated to enhance clinical decision-making and improve patient outcome predictions. All ISLES&#39;24 materials, including data, performance evaluation scripts, and leading algorithmic strategies, are available to the research community following \url{https://isles-24.grand-challenge.org/}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10966v1-abstract-full').style.display = 'none'; document.getElementById('2408.10966v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.19425">arXiv:2403.19425</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.19425">pdf</a>, <a href="https://arxiv.org/ps/2403.19425">ps</a>, <a href="https://arxiv.org/format/2403.19425">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A Robust Ensemble Algorithm for Ischemic Stroke Lesion Segmentation: Generalizability and Clinical Utility Beyond the ISLES Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Liew%2C+S">Sook-Lei Liew</a>, <a href="/search/cs?searchtype=author&amp;query=Hutton%2C+A">Alexandre Hutton</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Kaesmacher%2C+J">Johannes Kaesmacher</a>, <a href="/search/cs?searchtype=author&amp;query=Hanning%2C+U">Uta Hanning</a>, <a href="/search/cs?searchtype=author&amp;query=Hakim%2C+A">Arsany Hakim</a>, <a href="/search/cs?searchtype=author&amp;query=Zubal%2C+R">Richard Zubal</a>, <a href="/search/cs?searchtype=author&amp;query=Valenzuela%2C+W">Waldo Valenzuela</a>, <a href="/search/cs?searchtype=author&amp;query=Robben%2C+D">David Robben</a>, <a href="/search/cs?searchtype=author&amp;query=Sima%2C+D+M">Diana M. Sima</a>, <a href="/search/cs?searchtype=author&amp;query=Anania%2C+V">Vincenzo Anania</a>, <a href="/search/cs?searchtype=author&amp;query=Brys%2C+A">Arne Brys</a>, <a href="/search/cs?searchtype=author&amp;query=Meakin%2C+J+A">James A. Meakin</a>, <a href="/search/cs?searchtype=author&amp;query=Mickan%2C+A">Anne Mickan</a>, <a href="/search/cs?searchtype=author&amp;query=Broocks%2C+G">Gabriel Broocks</a>, <a href="/search/cs?searchtype=author&amp;query=Heitkamp%2C+C">Christian Heitkamp</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+S">Shengbo Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+K">Kongming Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Ziji Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Siddiquee%2C+M+M+R">Md Mahfuzur Rahman Siddiquee</a>, <a href="/search/cs?searchtype=author&amp;query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/cs?searchtype=author&amp;query=Ashtari%2C+P">Pooya Ashtari</a>, <a href="/search/cs?searchtype=author&amp;query=Van+Huffel%2C+S">Sabine Van Huffel</a> , et al. (33 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.19425v2-abstract-short" style="display: inline;"> Diffusion-weighted MRI (DWI) is essential for stroke diagnosis, treatment decisions, and prognosis. However, image and disease variability hinder the development of generalizable AI algorithms with clinical value. We address this gap by presenting a novel ensemble algorithm derived from the 2022 Ischemic Stroke Lesion Segmentation (ISLES) challenge. ISLES&#39;22 provided 400 patient scans with ischemi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19425v2-abstract-full').style.display = 'inline'; document.getElementById('2403.19425v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.19425v2-abstract-full" style="display: none;"> Diffusion-weighted MRI (DWI) is essential for stroke diagnosis, treatment decisions, and prognosis. However, image and disease variability hinder the development of generalizable AI algorithms with clinical value. We address this gap by presenting a novel ensemble algorithm derived from the 2022 Ischemic Stroke Lesion Segmentation (ISLES) challenge. ISLES&#39;22 provided 400 patient scans with ischemic stroke from various medical centers, facilitating the development of a wide range of cutting-edge segmentation algorithms by the research community. Through collaboration with leading teams, we combined top-performing algorithms into an ensemble model that overcomes the limitations of individual solutions. Our ensemble model achieved superior ischemic lesion detection and segmentation accuracy on our internal test set compared to individual algorithms. This accuracy generalized well across diverse image and disease variables. Furthermore, the model excelled in extracting clinical biomarkers. Notably, in a Turing-like test, neuroradiologists consistently preferred the algorithm&#39;s segmentations over manual expert efforts, highlighting increased comprehensiveness and precision. Validation using a real-world external dataset (N=1686) confirmed the model&#39;s generalizability. The algorithm&#39;s outputs also demonstrated strong correlations with clinical scores (admission NIHSS and 90-day mRS) on par with or exceeding expert-derived results, underlining its clinical relevance. This study offers two key findings. First, we present an ensemble algorithm (https://github.com/Tabrisrei/ISLES22_Ensemble) that detects and segments ischemic stroke lesions on DWI across diverse scenarios on par with expert (neuro)radiologists. Second, we show the potential for biomedical challenge outputs to extend beyond the challenge&#39;s initial objectives, demonstrating their real-world clinical applicability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19425v2-abstract-full').style.display = 'none'; document.getElementById('2403.19425v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.17670">arXiv:2312.17670</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.17670">pdf</a>, <a href="https://arxiv.org/format/2312.17670">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Tissues and Organs">q-bio.TO</span> </div> </div> <p class="title is-5 mathjax"> Benchmarking the CoW with the TopCoW Challenge: Topology-Aware Anatomical Segmentation of the Circle of Willis for CTA and MRA </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Musio%2C+F">Fabio Musio</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Y">Yihui Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Juchler%2C+N">Norman Juchler</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=Al-Maskari%2C+R">Rami Al-Maskari</a>, <a href="/search/cs?searchtype=author&amp;query=H%C3%B6her%2C+L">Luciano H枚her</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Hamamci%2C+I+E">Ibrahim Ethem Hamamci</a>, <a href="/search/cs?searchtype=author&amp;query=Sekuboyina%2C+A">Anjany Sekuboyina</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Houjing Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Prabhakar%2C+C">Chinmay Prabhakar</a>, <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Navarro%2C+F">Fernando Navarro</a>, <a href="/search/cs?searchtype=author&amp;query=Menten%2C+M">Martin Menten</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Vos%2C+I">Iris Vos</a>, <a href="/search/cs?searchtype=author&amp;query=Ruigrok%2C+Y">Ynte Ruigrok</a>, <a href="/search/cs?searchtype=author&amp;query=Velthuis%2C+B">Birgitta Velthuis</a>, <a href="/search/cs?searchtype=author&amp;query=Kuijf%2C+H">Hugo Kuijf</a>, <a href="/search/cs?searchtype=author&amp;query=H%C3%A4mmerli%2C+J">Julien H盲mmerli</a> , et al. (59 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.17670v3-abstract-short" style="display: inline;"> The Circle of Willis (CoW) is an important network of arteries connecting major circulations of the brain. Its vascular architecture is believed to affect the risk, severity, and clinical outcome of serious neuro-vascular diseases. However, characterizing the highly variable CoW anatomy is still a manual and time-consuming expert task. The CoW is usually imaged by two angiographic imaging modaliti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.17670v3-abstract-full').style.display = 'inline'; document.getElementById('2312.17670v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.17670v3-abstract-full" style="display: none;"> The Circle of Willis (CoW) is an important network of arteries connecting major circulations of the brain. Its vascular architecture is believed to affect the risk, severity, and clinical outcome of serious neuro-vascular diseases. However, characterizing the highly variable CoW anatomy is still a manual and time-consuming expert task. The CoW is usually imaged by two angiographic imaging modalities, magnetic resonance angiography (MRA) and computed tomography angiography (CTA), but there exist limited public datasets with annotations on CoW anatomy, especially for CTA. Therefore we organized the TopCoW Challenge in 2023 with the release of an annotated CoW dataset. The TopCoW dataset was the first public dataset with voxel-level annotations for thirteen possible CoW vessel components, enabled by virtual-reality (VR) technology. It was also the first large dataset with paired MRA and CTA from the same patients. TopCoW challenge formalized the CoW characterization problem as a multiclass anatomical segmentation task with an emphasis on topological metrics. We invited submissions worldwide for the CoW segmentation task, which attracted over 140 registered participants from four continents. The top performing teams managed to segment many CoW components to Dice scores around 90%, but with lower scores for communicating arteries and rare variants. There were also topological mistakes for predictions with high Dice scores. Additional topological analysis revealed further areas for improvement in detecting certain CoW components and matching CoW variant topology accurately. TopCoW represented a first attempt at benchmarking the CoW anatomical segmentation task for MRA and CTA, both morphologically and topologically. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.17670v3-abstract-full').style.display = 'none'; document.getElementById('2312.17670v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 11 figures, 9 tables. Summary Paper for the MICCAI TopCoW 2023 Challenge</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.09011">arXiv:2305.09011</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.09011">pdf</a>, <a href="https://arxiv.org/format/2305.09011">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> The Brain Tumor Segmentation (BraTS) Challenge 2023: Brain MR Image Synthesis for Tumor Segmentation (BraSyn) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Conte%2C+G+M">Gian Marco Conte</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Q">Qingqiao Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Anwar%2C+S+M">Syed Muhammad Anwar</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=van+Leemput%2C+K">Koen van Leemput</a>, <a href="/search/cs?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a>, <a href="/search/cs?searchtype=author&amp;query=Diaz%2C+M">Maria Diaz</a>, <a href="/search/cs?searchtype=author&amp;query=Cole%2C+B">Byrone Cole</a>, <a href="/search/cs?searchtype=author&amp;query=Calabrese%2C+E">Evan Calabrese</a>, <a href="/search/cs?searchtype=author&amp;query=Rudie%2C+J">Jeff Rudie</a>, <a href="/search/cs?searchtype=author&amp;query=Meissen%2C+F">Felix Meissen</a>, <a href="/search/cs?searchtype=author&amp;query=Adewole%2C+M">Maruf Adewole</a>, <a href="/search/cs?searchtype=author&amp;query=Janas%2C+A">Anastasia Janas</a>, <a href="/search/cs?searchtype=author&amp;query=Kazerooni%2C+A+F">Anahita Fathi Kazerooni</a>, <a href="/search/cs?searchtype=author&amp;query=LaBella%2C+D">Dominic LaBella</a>, <a href="/search/cs?searchtype=author&amp;query=Moawad%2C+A+W">Ahmed W. Moawad</a>, <a href="/search/cs?searchtype=author&amp;query=Farahani%2C+K">Keyvan Farahani</a>, <a href="/search/cs?searchtype=author&amp;query=Eddy%2C+J">James Eddy</a>, <a href="/search/cs?searchtype=author&amp;query=Bergquist%2C+T">Timothy Bergquist</a>, <a href="/search/cs?searchtype=author&amp;query=Chung%2C+V">Verena Chung</a>, <a href="/search/cs?searchtype=author&amp;query=Shinohara%2C+R+T">Russell Takeshi Shinohara</a>, <a href="/search/cs?searchtype=author&amp;query=Dako%2C+F">Farouk Dako</a>, <a href="/search/cs?searchtype=author&amp;query=Wiggins%2C+W">Walter Wiggins</a> , et al. (44 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.09011v6-abstract-short" style="display: inline;"> Automated brain tumor segmentation methods have become well-established and reached performance levels offering clear clinical utility. These methods typically rely on four input magnetic resonance imaging (MRI) modalities: T1-weighted images with and without contrast enhancement, T2-weighted images, and FLAIR images. However, some sequences are often missing in clinical practice due to time const&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.09011v6-abstract-full').style.display = 'inline'; document.getElementById('2305.09011v6-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.09011v6-abstract-full" style="display: none;"> Automated brain tumor segmentation methods have become well-established and reached performance levels offering clear clinical utility. These methods typically rely on four input magnetic resonance imaging (MRI) modalities: T1-weighted images with and without contrast enhancement, T2-weighted images, and FLAIR images. However, some sequences are often missing in clinical practice due to time constraints or image artifacts, such as patient motion. Consequently, the ability to substitute missing modalities and gain segmentation performance is highly desirable and necessary for the broader adoption of these algorithms in the clinical routine. In this work, we present the establishment of the Brain MR Image Synthesis Benchmark (BraSyn) in conjunction with the Medical Image Computing and Computer-Assisted Intervention (MICCAI) 2023. The primary objective of this challenge is to evaluate image synthesis methods that can realistically generate missing MRI modalities when multiple available images are provided. The ultimate aim is to facilitate automated brain tumor segmentation pipelines. The image dataset used in the benchmark is diverse and multi-modal, created through collaboration with various hospitals and research institutions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.09011v6-abstract-full').style.display = 'none'; document.getElementById('2305.09011v6-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Technical report of BraSyn</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.08992">arXiv:2305.08992</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.08992">pdf</a>, <a href="https://arxiv.org/format/2305.08992">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> The Brain Tumor Segmentation (BraTS) Challenge: Local Synthesis of Healthy Brain Tissue via Inpainting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Meissen%2C+F">Felix Meissen</a>, <a href="/search/cs?searchtype=author&amp;query=Steinbauer%2C+F">Felix Steinbauer</a>, <a href="/search/cs?searchtype=author&amp;query=Graf%2C+R">Robert Graf</a>, <a href="/search/cs?searchtype=author&amp;query=Ehrlich%2C+S+K">Stefan K Ehrlich</a>, <a href="/search/cs?searchtype=author&amp;query=Reinke%2C+A">Annika Reinke</a>, <a href="/search/cs?searchtype=author&amp;query=Oswald%2C+E">Eva Oswald</a>, <a href="/search/cs?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/cs?searchtype=author&amp;query=Hoelzl%2C+F">Florian Hoelzl</a>, <a href="/search/cs?searchtype=author&amp;query=Horvath%2C+I">Izabela Horvath</a>, <a href="/search/cs?searchtype=author&amp;query=Turgut%2C+O">Oezguen Turgut</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Bukas%2C+C">Christina Bukas</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=de+da+Rosa%2C+E">Ezequiel de da Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Mekki%2C+I">Isra Mekki</a>, <a href="/search/cs?searchtype=author&amp;query=Vinayahalingam%2C+S">Shankeeth Vinayahalingam</a>, <a href="/search/cs?searchtype=author&amp;query=Kassem%2C+H">Hasan Kassem</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Juexin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+K">Ke Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Weng%2C+Y">Ying Weng</a>, <a href="/search/cs?searchtype=author&amp;query=Durrer%2C+A">Alicia Durrer</a>, <a href="/search/cs?searchtype=author&amp;query=Cattin%2C+P+C">Philippe C. Cattin</a>, <a href="/search/cs?searchtype=author&amp;query=Wolleb%2C+J">Julia Wolleb</a> , et al. (81 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.08992v3-abstract-short" style="display: inline;"> A myriad of algorithms for the automatic analysis of brain MR images is available to support clinicians in their decision-making. For brain tumor patients, the image acquisition time series typically starts with an already pathological scan. This poses problems, as many algorithms are designed to analyze healthy brains and provide no guarantee for images featuring lesions. Examples include, but ar&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.08992v3-abstract-full').style.display = 'inline'; document.getElementById('2305.08992v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.08992v3-abstract-full" style="display: none;"> A myriad of algorithms for the automatic analysis of brain MR images is available to support clinicians in their decision-making. For brain tumor patients, the image acquisition time series typically starts with an already pathological scan. This poses problems, as many algorithms are designed to analyze healthy brains and provide no guarantee for images featuring lesions. Examples include, but are not limited to, algorithms for brain anatomy parcellation, tissue segmentation, and brain extraction. To solve this dilemma, we introduce the BraTS inpainting challenge. Here, the participants explore inpainting techniques to synthesize healthy brain scans from lesioned ones. The following manuscript contains the task formulation, dataset, and submission procedure. Later, it will be updated to summarize the findings of the challenge. The challenge is organized as part of the ASNR-BraTS MICCAI challenge. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.08992v3-abstract-full').style.display = 'none'; document.getElementById('2305.08992v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.06694">arXiv:2206.06694</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.06694">pdf</a>, <a href="https://arxiv.org/format/2206.06694">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41597-022-01875-5">10.1038/s41597-022-01875-5 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> ISLES 2022: A multi-center magnetic resonance imaging stroke lesion segmentation dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Petzsche%2C+M+R+H">Moritz Roman Hernandez Petzsche</a>, <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Hanning%2C+U">Uta Hanning</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Pinilla%2C+W+E+V">Waldo Enrique Valenzuela Pinilla</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Meyer%2C+M+I">Maria Ines Meyer</a>, <a href="/search/cs?searchtype=author&amp;query=Liew%2C+S">Sook-Lei Liew</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Robben%2C+D">David Robben</a>, <a href="/search/cs?searchtype=author&amp;query=Hutton%2C+A">Alexander Hutton</a>, <a href="/search/cs?searchtype=author&amp;query=Friedrich%2C+T">Tassilo Friedrich</a>, <a href="/search/cs?searchtype=author&amp;query=Zarth%2C+T">Teresa Zarth</a>, <a href="/search/cs?searchtype=author&amp;query=B%C3%BCrkle%2C+J">Johannes B眉rkle</a>, <a href="/search/cs?searchtype=author&amp;query=Baran%2C+T+A">The Anh Baran</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Broocks%2C+G">Gabriel Broocks</a>, <a href="/search/cs?searchtype=author&amp;query=Meyer%2C+L">Lukas Meyer</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmer%2C+C">Claus Zimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Boeckh-Behrens%2C+T">Tobias Boeckh-Behrens</a>, <a href="/search/cs?searchtype=author&amp;query=Berndt%2C+M">Maria Berndt</a>, <a href="/search/cs?searchtype=author&amp;query=Ikenberg%2C+B">Benno Ikenberg</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.06694v1-abstract-short" style="display: inline;"> Magnetic resonance imaging (MRI) is a central modality for stroke imaging. It is used upon patient admission to make treatment decisions such as selecting patients for intravenous thrombolysis or endovascular therapy. MRI is later used in the duration of hospital stay to predict outcome by visualizing infarct core size and location. Furthermore, it may be used to characterize stroke etiology, e.g.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.06694v1-abstract-full').style.display = 'inline'; document.getElementById('2206.06694v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.06694v1-abstract-full" style="display: none;"> Magnetic resonance imaging (MRI) is a central modality for stroke imaging. It is used upon patient admission to make treatment decisions such as selecting patients for intravenous thrombolysis or endovascular therapy. MRI is later used in the duration of hospital stay to predict outcome by visualizing infarct core size and location. Furthermore, it may be used to characterize stroke etiology, e.g. differentiation between (cardio)-embolic and non-embolic stroke. Computer based automated medical image processing is increasingly finding its way into clinical routine. Previous iterations of the Ischemic Stroke Lesion Segmentation (ISLES) challenge have aided in the generation of identifying benchmark methods for acute and sub-acute ischemic stroke lesion segmentation. Here we introduce an expert-annotated, multicenter MRI dataset for segmentation of acute to subacute stroke lesions. This dataset comprises 400 multi-vendor MRI cases with high variability in stroke lesion size, quantity and location. It is split into a training dataset of n=250 and a test dataset of n=150. All training data will be made publicly available. The test dataset will be used for model validation only and will not be released to the public. This dataset serves as the foundation of the ISLES 2022 challenge with the goal of finding algorithmic methods to enable the development and benchmarking of robust and accurate segmentation algorithms for ischemic stroke. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.06694v1-abstract-full').style.display = 'none'; document.getElementById('2206.06694v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 2 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Scientific data 9.1 (2022): 762 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.10836">arXiv:2204.10836</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2204.10836">pdf</a>, <a href="https://arxiv.org/format/2204.10836">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41467-022-33407-5">10.1038/s41467-022-33407-5 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Federated Learning Enables Big Data for Rare Cancer Boundary Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pati%2C+S">Sarthak Pati</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Edwards%2C+B">Brandon Edwards</a>, <a href="/search/cs?searchtype=author&amp;query=Sheller%2C+M">Micah Sheller</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shih-Han Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Reina%2C+G+A">G Anthony Reina</a>, <a href="/search/cs?searchtype=author&amp;query=Foley%2C+P">Patrick Foley</a>, <a href="/search/cs?searchtype=author&amp;query=Gruzdev%2C+A">Alexey Gruzdev</a>, <a href="/search/cs?searchtype=author&amp;query=Karkada%2C+D">Deepthi Karkada</a>, <a href="/search/cs?searchtype=author&amp;query=Davatzikos%2C+C">Christos Davatzikos</a>, <a href="/search/cs?searchtype=author&amp;query=Sako%2C+C">Chiharu Sako</a>, <a href="/search/cs?searchtype=author&amp;query=Ghodasara%2C+S">Satyam Ghodasara</a>, <a href="/search/cs?searchtype=author&amp;query=Bilello%2C+M">Michel Bilello</a>, <a href="/search/cs?searchtype=author&amp;query=Mohan%2C+S">Suyash Mohan</a>, <a href="/search/cs?searchtype=author&amp;query=Vollmuth%2C+P">Philipp Vollmuth</a>, <a href="/search/cs?searchtype=author&amp;query=Brugnara%2C+G">Gianluca Brugnara</a>, <a href="/search/cs?searchtype=author&amp;query=Preetha%2C+C+J">Chandrakanth J Preetha</a>, <a href="/search/cs?searchtype=author&amp;query=Sahm%2C+F">Felix Sahm</a>, <a href="/search/cs?searchtype=author&amp;query=Maier-Hein%2C+K">Klaus Maier-Hein</a>, <a href="/search/cs?searchtype=author&amp;query=Zenk%2C+M">Maximilian Zenk</a>, <a href="/search/cs?searchtype=author&amp;query=Bendszus%2C+M">Martin Bendszus</a>, <a href="/search/cs?searchtype=author&amp;query=Wick%2C+W">Wolfgang Wick</a>, <a href="/search/cs?searchtype=author&amp;query=Calabrese%2C+E">Evan Calabrese</a>, <a href="/search/cs?searchtype=author&amp;query=Rudie%2C+J">Jeffrey Rudie</a>, <a href="/search/cs?searchtype=author&amp;query=Villanueva-Meyer%2C+J">Javier Villanueva-Meyer</a> , et al. (254 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.10836v2-abstract-short" style="display: inline;"> Although machine learning (ML) has shown promise in numerous domains, there are concerns about generalizability to out-of-sample data. This is currently addressed by centrally sharing ample, and importantly diverse, data from multiple sites. However, such centralization is challenging to scale (or even not feasible) due to various limitations. Federated ML (FL) provides an alternative to train acc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.10836v2-abstract-full').style.display = 'inline'; document.getElementById('2204.10836v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.10836v2-abstract-full" style="display: none;"> Although machine learning (ML) has shown promise in numerous domains, there are concerns about generalizability to out-of-sample data. This is currently addressed by centrally sharing ample, and importantly diverse, data from multiple sites. However, such centralization is challenging to scale (or even not feasible) due to various limitations. Federated ML (FL) provides an alternative to train accurate and generalizable ML models, by only sharing numerical model updates. Here we present findings from the largest FL study to-date, involving data from 71 healthcare institutions across 6 continents, to generate an automatic tumor boundary detector for the rare disease of glioblastoma, utilizing the largest dataset of such patients ever used in the literature (25,256 MRI scans from 6,314 patients). We demonstrate a 33% improvement over a publicly trained model to delineate the surgically targetable tumor, and 23% improvement over the tumor&#39;s entire extent. We anticipate our study to: 1) enable more studies in healthcare informed by large and diverse data, ensuring meaningful results for rare diseases and underrepresented populations, 2) facilitate further quantitative analyses for glioblastoma via performance optimization of our consensus model for eventual public release, and 3) demonstrate the effectiveness of FL at such scale and task complexity as a paradigm shift for multi-site collaborations, alleviating the need for data sharing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.10836v2-abstract-full').style.display = 'none'; document.getElementById('2204.10836v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">federated learning, deep learning, convolutional neural network, segmentation, brain tumor, glioma, glioblastoma, FeTS, BraTS</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.10074">arXiv:2112.10074</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.10074">pdf</a>, <a href="https://arxiv.org/format/2112.10074">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.59275/j.melba.2022-354b">10.59275/j.melba.2022-354b <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> QU-BraTS: MICCAI BraTS 2020 Challenge on Quantifying Uncertainty in Brain Tumor Segmentation - Analysis of Ranking Scores and Benchmarking Results </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mehta%2C+R">Raghav Mehta</a>, <a href="/search/cs?searchtype=author&amp;query=Filos%2C+A">Angelos Filos</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Sako%2C+C">Chiharu Sako</a>, <a href="/search/cs?searchtype=author&amp;query=McKinley%2C+R">Richard McKinley</a>, <a href="/search/cs?searchtype=author&amp;query=Rebsamen%2C+M">Michael Rebsamen</a>, <a href="/search/cs?searchtype=author&amp;query=Datwyler%2C+K">Katrin Datwyler</a>, <a href="/search/cs?searchtype=author&amp;query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&amp;query=Radojewski%2C+P">Piotr Radojewski</a>, <a href="/search/cs?searchtype=author&amp;query=Murugesan%2C+G+K">Gowtham Krishnan Murugesan</a>, <a href="/search/cs?searchtype=author&amp;query=Nalawade%2C+S">Sahil Nalawade</a>, <a href="/search/cs?searchtype=author&amp;query=Ganesh%2C+C">Chandan Ganesh</a>, <a href="/search/cs?searchtype=author&amp;query=Wagner%2C+B">Ben Wagner</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+F+F">Fang F. Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Fei%2C+B">Baowei Fei</a>, <a href="/search/cs?searchtype=author&amp;query=Madhuranthakam%2C+A+J">Ananth J. Madhuranthakam</a>, <a href="/search/cs?searchtype=author&amp;query=Maldjian%2C+J+A">Joseph A. Maldjian</a>, <a href="/search/cs?searchtype=author&amp;query=Daza%2C+L">Laura Daza</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+C">Catalina Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Arbelaez%2C+P">Pablo Arbelaez</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+C">Chengliang Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shuo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Reynaud%2C+H">Hadrien Reynaud</a>, <a href="/search/cs?searchtype=author&amp;query=Mo%2C+Y">Yuan-han Mo</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a> , et al. (67 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.10074v2-abstract-short" style="display: inline;"> Deep learning (DL) models have provided state-of-the-art performance in various medical imaging benchmarking challenges, including the Brain Tumor Segmentation (BraTS) challenges. However, the task of focal pathology multi-compartment segmentation (e.g., tumor and lesion sub-regions) is particularly challenging, and potential errors hinder translating DL models into clinical workflows. Quantifying&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.10074v2-abstract-full').style.display = 'inline'; document.getElementById('2112.10074v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.10074v2-abstract-full" style="display: none;"> Deep learning (DL) models have provided state-of-the-art performance in various medical imaging benchmarking challenges, including the Brain Tumor Segmentation (BraTS) challenges. However, the task of focal pathology multi-compartment segmentation (e.g., tumor and lesion sub-regions) is particularly challenging, and potential errors hinder translating DL models into clinical workflows. Quantifying the reliability of DL model predictions in the form of uncertainties could enable clinical review of the most uncertain regions, thereby building trust and paving the way toward clinical translation. Several uncertainty estimation methods have recently been introduced for DL medical image segmentation tasks. Developing scores to evaluate and compare the performance of uncertainty measures will assist the end-user in making more informed decisions. In this study, we explore and evaluate a score developed during the BraTS 2019 and BraTS 2020 task on uncertainty quantification (QU-BraTS) and designed to assess and rank uncertainty estimates for brain tumor multi-compartment segmentation. This score (1) rewards uncertainty estimates that produce high confidence in correct assertions and those that assign low confidence levels at incorrect assertions, and (2) penalizes uncertainty measures that lead to a higher percentage of under-confident correct assertions. We further benchmark the segmentation uncertainties generated by 14 independent participating teams of QU-BraTS 2020, all of which also participated in the main BraTS segmentation task. Overall, our findings confirm the importance and complementary value that uncertainty estimates provide to segmentation algorithms, highlighting the need for uncertainty quantification in medical image analyses. Finally, in favor of transparency and reproducibility, our evaluation code is made publicly available at: https://github.com/RagMeh11/QU-BraTS. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.10074v2-abstract-full').style.display = 'none'; document.getElementById('2112.10074v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at the Journal of Machine Learning for Biomedical Imaging (MELBA): https://www.melba-journal.org/papers/2022:026.html</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Machine.Learning.for.Biomedical.Imaging. 1 (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.02314">arXiv:2107.02314</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.02314">pdf</a>, <a href="https://arxiv.org/format/2107.02314">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> The RSNA-ASNR-MICCAI BraTS 2021 Benchmark on Brain Tumor Segmentation and Radiogenomic Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Ghodasara%2C+S">Satyam Ghodasara</a>, <a href="/search/cs?searchtype=author&amp;query=Mohan%2C+S">Suyash Mohan</a>, <a href="/search/cs?searchtype=author&amp;query=Bilello%2C+M">Michel Bilello</a>, <a href="/search/cs?searchtype=author&amp;query=Calabrese%2C+E">Evan Calabrese</a>, <a href="/search/cs?searchtype=author&amp;query=Colak%2C+E">Errol Colak</a>, <a href="/search/cs?searchtype=author&amp;query=Farahani%2C+K">Keyvan Farahani</a>, <a href="/search/cs?searchtype=author&amp;query=Kalpathy-Cramer%2C+J">Jayashree Kalpathy-Cramer</a>, <a href="/search/cs?searchtype=author&amp;query=Kitamura%2C+F+C">Felipe C. Kitamura</a>, <a href="/search/cs?searchtype=author&amp;query=Pati%2C+S">Sarthak Pati</a>, <a href="/search/cs?searchtype=author&amp;query=Prevedello%2C+L+M">Luciano M. Prevedello</a>, <a href="/search/cs?searchtype=author&amp;query=Rudie%2C+J+D">Jeffrey D. Rudie</a>, <a href="/search/cs?searchtype=author&amp;query=Sako%2C+C">Chiharu Sako</a>, <a href="/search/cs?searchtype=author&amp;query=Shinohara%2C+R+T">Russell T. Shinohara</a>, <a href="/search/cs?searchtype=author&amp;query=Bergquist%2C+T">Timothy Bergquist</a>, <a href="/search/cs?searchtype=author&amp;query=Chai%2C+R">Rong Chai</a>, <a href="/search/cs?searchtype=author&amp;query=Eddy%2C+J">James Eddy</a>, <a href="/search/cs?searchtype=author&amp;query=Elliott%2C+J">Julia Elliott</a>, <a href="/search/cs?searchtype=author&amp;query=Reade%2C+W">Walter Reade</a>, <a href="/search/cs?searchtype=author&amp;query=Schaffter%2C+T">Thomas Schaffter</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+T">Thomas Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+J">Jiaxin Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Moawad%2C+A+W">Ahmed W. Moawad</a>, <a href="/search/cs?searchtype=author&amp;query=Coelho%2C+L+O">Luiz Otavio Coelho</a>, <a href="/search/cs?searchtype=author&amp;query=McDonnell%2C+O">Olivia McDonnell</a> , et al. (78 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.02314v2-abstract-short" style="display: inline;"> The BraTS 2021 challenge celebrates its 10th anniversary and is jointly organized by the Radiological Society of North America (RSNA), the American Society of Neuroradiology (ASNR), and the Medical Image Computing and Computer Assisted Interventions (MICCAI) society. Since its inception, BraTS has been focusing on being a common benchmarking venue for brain glioma segmentation algorithms, with wel&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.02314v2-abstract-full').style.display = 'inline'; document.getElementById('2107.02314v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.02314v2-abstract-full" style="display: none;"> The BraTS 2021 challenge celebrates its 10th anniversary and is jointly organized by the Radiological Society of North America (RSNA), the American Society of Neuroradiology (ASNR), and the Medical Image Computing and Computer Assisted Interventions (MICCAI) society. Since its inception, BraTS has been focusing on being a common benchmarking venue for brain glioma segmentation algorithms, with well-curated multi-institutional multi-parametric magnetic resonance imaging (mpMRI) data. Gliomas are the most common primary malignancies of the central nervous system, with varying degrees of aggressiveness and prognosis. The RSNA-ASNR-MICCAI BraTS 2021 challenge targets the evaluation of computational algorithms assessing the same tumor compartmentalization, as well as the underlying tumor&#39;s molecular characterization, in pre-operative baseline mpMRI data from 2,040 patients. Specifically, the two tasks that BraTS 2021 focuses on are: a) the segmentation of the histologically distinct brain tumor sub-regions, and b) the classification of the tumor&#39;s O[6]-methylguanine-DNA methyltransferase (MGMT) promoter methylation status. The performance evaluation of all participating algorithms in BraTS 2021 will be conducted through the Sage Bionetworks Synapse platform (Task 1) and Kaggle (Task 2), concluding in distributing to the top ranked participants monetary awards of $60,000 collectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.02314v2-abstract-full').style.display = 'none'; document.getElementById('2107.02314v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages, 2 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.05874">arXiv:2105.05874</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2105.05874">pdf</a>, <a href="https://arxiv.org/format/2105.05874">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> The Federated Tumor Segmentation (FeTS) Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pati%2C+S">Sarthak Pati</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Zenk%2C+M">Maximilian Zenk</a>, <a href="/search/cs?searchtype=author&amp;query=Edwards%2C+B">Brandon Edwards</a>, <a href="/search/cs?searchtype=author&amp;query=Sheller%2C+M">Micah Sheller</a>, <a href="/search/cs?searchtype=author&amp;query=Reina%2C+G+A">G. Anthony Reina</a>, <a href="/search/cs?searchtype=author&amp;query=Foley%2C+P">Patrick Foley</a>, <a href="/search/cs?searchtype=author&amp;query=Gruzdev%2C+A">Alexey Gruzdev</a>, <a href="/search/cs?searchtype=author&amp;query=Martin%2C+J">Jason Martin</a>, <a href="/search/cs?searchtype=author&amp;query=Albarqouni%2C+S">Shadi Albarqouni</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yong Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Shinohara%2C+R+T">Russell Taki Shinohara</a>, <a href="/search/cs?searchtype=author&amp;query=Reinke%2C+A">Annika Reinke</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmerer%2C+D">David Zimmerer</a>, <a href="/search/cs?searchtype=author&amp;query=Freymann%2C+J+B">John B. Freymann</a>, <a href="/search/cs?searchtype=author&amp;query=Kirby%2C+J+S">Justin S. Kirby</a>, <a href="/search/cs?searchtype=author&amp;query=Davatzikos%2C+C">Christos Davatzikos</a>, <a href="/search/cs?searchtype=author&amp;query=Colen%2C+R+R">Rivka R. Colen</a>, <a href="/search/cs?searchtype=author&amp;query=Kotrotsou%2C+A">Aikaterini Kotrotsou</a>, <a href="/search/cs?searchtype=author&amp;query=Marcus%2C+D">Daniel Marcus</a>, <a href="/search/cs?searchtype=author&amp;query=Milchenko%2C+M">Mikhail Milchenko</a>, <a href="/search/cs?searchtype=author&amp;query=Nazeri%2C+A">Arash Nazeri</a>, <a href="/search/cs?searchtype=author&amp;query=Fathallah-Shaykh%2C+H">Hassan Fathallah-Shaykh</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Jakab%2C+A">Andras Jakab</a> , et al. (7 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.05874v2-abstract-short" style="display: inline;"> This manuscript describes the first challenge on Federated Learning, namely the Federated Tumor Segmentation (FeTS) challenge 2021. International challenges have become the standard for validation of biomedical image analysis methods. However, the actual performance of participating (even the winning) algorithms on &#34;real-world&#34; clinical data often remains unclear, as the data included in challenge&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.05874v2-abstract-full').style.display = 'inline'; document.getElementById('2105.05874v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.05874v2-abstract-full" style="display: none;"> This manuscript describes the first challenge on Federated Learning, namely the Federated Tumor Segmentation (FeTS) challenge 2021. International challenges have become the standard for validation of biomedical image analysis methods. However, the actual performance of participating (even the winning) algorithms on &#34;real-world&#34; clinical data often remains unclear, as the data included in challenges are usually acquired in very controlled settings at few institutions. The seemingly obvious solution of just collecting increasingly more data from more institutions in such challenges does not scale well due to privacy and ownership hurdles. Towards alleviating these concerns, we are proposing the FeTS challenge 2021 to cater towards both the development and the evaluation of models for the segmentation of intrinsically heterogeneous (in appearance, shape, and histology) brain tumors, namely gliomas. Specifically, the FeTS 2021 challenge uses clinically acquired, multi-institutional magnetic resonance imaging (MRI) scans from the BraTS 2020 challenge, as well as from various remote independent institutions included in the collaborative network of a real-world federation (https://www.fets.ai/). The goals of the FeTS challenge are directly represented by the two included tasks: 1) the identification of the optimal weight aggregation approach towards the training of a consensus model that has gained knowledge via federated learning from multiple geographically distinct institutions, while their data are always retained within each institution, and 2) the federated evaluation of the generalizability of brain tumor segmentation models &#34;in the wild&#34;, i.e. on data from institutional distributions that were not part of the training datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.05874v2-abstract-full').style.display = 'none'; document.getElementById('2105.05874v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.00489">arXiv:2101.00489</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.00489">pdf</a>, <a href="https://arxiv.org/format/2101.00489">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.media.2020.101888">10.1016/j.media.2020.101888 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Combining unsupervised and supervised learning for predicting the final stroke lesion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pinto%2C+A">Adriano Pinto</a>, <a href="/search/cs?searchtype=author&amp;query=Pereira%2C+S">S茅rgio Pereira</a>, <a href="/search/cs?searchtype=author&amp;query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Alves%2C+V">Victor Alves</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Silva%2C+C+A">Carlos A. Silva</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.00489v1-abstract-short" style="display: inline;"> Predicting the final ischaemic stroke lesion provides crucial information regarding the volume of salvageable hypoperfused tissue, which helps physicians in the difficult decision-making process of treatment planning and intervention. Treatment selection is influenced by clinical diagnosis, which requires delineating the stroke lesion, as well as characterising cerebral blood flow dynamics using n&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.00489v1-abstract-full').style.display = 'inline'; document.getElementById('2101.00489v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.00489v1-abstract-full" style="display: none;"> Predicting the final ischaemic stroke lesion provides crucial information regarding the volume of salvageable hypoperfused tissue, which helps physicians in the difficult decision-making process of treatment planning and intervention. Treatment selection is influenced by clinical diagnosis, which requires delineating the stroke lesion, as well as characterising cerebral blood flow dynamics using neuroimaging acquisitions. Nonetheless, predicting the final stroke lesion is an intricate task, due to the variability in lesion size, shape, location and the underlying cerebral haemodynamic processes that occur after the ischaemic stroke takes place. Moreover, since elapsed time between stroke and treatment is related to the loss of brain tissue, assessing and predicting the final stroke lesion needs to be performed in a short period of time, which makes the task even more complex. Therefore, there is a need for automatic methods that predict the final stroke lesion and support physicians in the treatment decision process. We propose a fully automatic deep learning method based on unsupervised and supervised learning to predict the final stroke lesion after 90 days. Our aim is to predict the final stroke lesion location and extent, taking into account the underlying cerebral blood flow dynamics that can influence the prediction. To achieve this, we propose a two-branch Restricted Boltzmann Machine, which provides specialized data-driven features from different sets of standard parametric Magnetic Resonance Imaging maps. These data-driven feature maps are then combined with the parametric Magnetic Resonance Imaging maps, and fed to a Convolutional and Recurrent Neural Network architecture. We evaluated our proposal on the publicly available ISLES 2017 testing dataset, reaching a Dice score of 0.38, Hausdorff Distance of 29.21 mm, and Average Symmetric Surface Distance of 5.52 mm. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.00489v1-abstract-full').style.display = 'none'; document.getElementById('2101.00489v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at Medical Image Analysis (MedIA)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.06436">arXiv:2012.06436</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2012.06436">pdf</a>, <a href="https://arxiv.org/format/2012.06436">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Uncertainty-driven refinement of tumor-core segmentation using 3D-to-2D networks with label uncertainty </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=McKinley%2C+R">Richard McKinley</a>, <a href="/search/cs?searchtype=author&amp;query=Rebsamen%2C+M">Micheal Rebsamen</a>, <a href="/search/cs?searchtype=author&amp;query=Daetwyler%2C+K">Katrin Daetwyler</a>, <a href="/search/cs?searchtype=author&amp;query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&amp;query=Radojewski%2C+P">Piotr Radojewski</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.06436v1-abstract-short" style="display: inline;"> The BraTS dataset contains a mixture of high-grade and low-grade gliomas, which have a rather different appearance: previous studies have shown that performance can be improved by separated training on low-grade gliomas (LGGs) and high-grade gliomas (HGGs), but in practice this information is not available at test time to decide which model to use. By contrast with HGGs, LGGs often present no shar&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.06436v1-abstract-full').style.display = 'inline'; document.getElementById('2012.06436v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.06436v1-abstract-full" style="display: none;"> The BraTS dataset contains a mixture of high-grade and low-grade gliomas, which have a rather different appearance: previous studies have shown that performance can be improved by separated training on low-grade gliomas (LGGs) and high-grade gliomas (HGGs), but in practice this information is not available at test time to decide which model to use. By contrast with HGGs, LGGs often present no sharp boundary between the tumor core and the surrounding edema, but rather a gradual reduction of tumor-cell density. Utilizing our 3D-to-2D fully convolutional architecture, DeepSCAN, which ranked highly in the 2019 BraTS challenge and was trained using an uncertainty-aware loss, we separate cases into those with a confidently segmented core, and those with a vaguely segmented or missing core. Since by assumption every tumor has a core, we reduce the threshold for classification of core tissue in those cases where the core, as segmented by the classifier, is vaguely defined or missing. We then predict survival of high-grade glioma patients using a fusion of linear regression and random forest classification, based on age, number of distinct tumor components, and number of distinct tumor cores. We present results on the validation dataset of the Multimodal Brain Tumor Segmentation Challenge 2020 (segmentation and uncertainty challenge), and on the testing set, where the method achieved 4th place in Segmentation, 1st place in uncertainty estimation, and 1st place in Survival prediction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.06436v1-abstract-full').style.display = 'none'; document.getElementById('2012.06436v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Presented (virtually) in the MICCAI Brainles workshop 2020. Accepted for publication in Brainles proceedings</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.03041">arXiv:1904.03041</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1904.03041">pdf</a>, <a href="https://arxiv.org/format/1904.03041">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Automatic detection of lesion load change in Multiple Sclerosis using convolutional neural networks with segmentation confidence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=McKinley%2C+R">Richard McKinley</a>, <a href="/search/cs?searchtype=author&amp;query=Grunder%2C+L">Lorenz Grunder</a>, <a href="/search/cs?searchtype=author&amp;query=Wepfer%2C+R">Rik Wepfer</a>, <a href="/search/cs?searchtype=author&amp;query=Aschwanden%2C+F">Fabian Aschwanden</a>, <a href="/search/cs?searchtype=author&amp;query=Fischer%2C+T">Tim Fischer</a>, <a href="/search/cs?searchtype=author&amp;query=Friedli%2C+C">Christoph Friedli</a>, <a href="/search/cs?searchtype=author&amp;query=Muri%2C+R">Raphaela Muri</a>, <a href="/search/cs?searchtype=author&amp;query=Rummel%2C+C">Christian Rummel</a>, <a href="/search/cs?searchtype=author&amp;query=Verma%2C+R">Rajeev Verma</a>, <a href="/search/cs?searchtype=author&amp;query=Weisstanner%2C+C">Christian Weisstanner</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Salmen%2C+A">Anke Salmen</a>, <a href="/search/cs?searchtype=author&amp;query=Chan%2C+A">Andrew Chan</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Wagner%2C+F">Franca Wagner</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.03041v1-abstract-short" style="display: inline;"> The detection of new or enlarged white-matter lesions in multiple sclerosis is a vital task in the monitoring of patients undergoing disease-modifying treatment for multiple sclerosis. However, the definition of &#39;new or enlarged&#39; is not fixed, and it is known that lesion-counting is highly subjective, with high degree of inter- and intra-rater variability. Automated methods for lesion quantificati&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.03041v1-abstract-full').style.display = 'inline'; document.getElementById('1904.03041v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.03041v1-abstract-full" style="display: none;"> The detection of new or enlarged white-matter lesions in multiple sclerosis is a vital task in the monitoring of patients undergoing disease-modifying treatment for multiple sclerosis. However, the definition of &#39;new or enlarged&#39; is not fixed, and it is known that lesion-counting is highly subjective, with high degree of inter- and intra-rater variability. Automated methods for lesion quantification hold the potential to make the detection of new and enlarged lesions consistent and repeatable. However, the majority of lesion segmentation algorithms are not evaluated for their ability to separate progressive from stable patients, despite this being a pressing clinical use-case. In this paper we show that change in volumetric measurements of lesion load alone is not a good method for performing this separation, even for highly performing segmentation methods. Instead, we propose a method for identifying lesion changes of high certainty, and establish on a dataset of longitudinal multiple sclerosis cases that this method is able to separate progressive from stable timepoints with a very high level of discrimination (AUC = 0.99), while changes in lesion volume are much less able to perform this separation (AUC = 0.71). Validation of the method on a second external dataset confirms that the method is able to generalize beyond the setting in which it was trained, achieving an accuracy of 83% in separating stable and progressive timepoints. Both lesion volume and count have previously been shown to be strong predictors of disease course across a population. However, we demonstrate that for individual patients, changes in these measures are not an adequate means of establishing no evidence of disease activity. Meanwhile, directly detecting tissue which changes, with high confidence, from non-lesion to lesion is a feasible methodology for identifying radiologically active patients. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.03041v1-abstract-full').style.display = 'none'; document.getElementById('1904.03041v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.02436">arXiv:1904.02436</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1904.02436">pdf</a>, <a href="https://arxiv.org/format/1904.02436">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Few-shot brain segmentation from weakly labeled data with deep heteroscedastic multi-task networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=McKinley%2C+R">Richard McKinley</a>, <a href="/search/cs?searchtype=author&amp;query=Rebsamen%2C+M">Michael Rebsamen</a>, <a href="/search/cs?searchtype=author&amp;query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Rummel%2C+C">Christian Rummel</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.02436v1-abstract-short" style="display: inline;"> In applications of supervised learning applied to medical image segmentation, the need for large amounts of labeled data typically goes unquestioned. In particular, in the case of brain anatomy segmentation, hundreds or thousands of weakly-labeled volumes are often used as training data. In this paper, we first observe that for many brain structures, a small number of training examples, (n=9), wea&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.02436v1-abstract-full').style.display = 'inline'; document.getElementById('1904.02436v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.02436v1-abstract-full" style="display: none;"> In applications of supervised learning applied to medical image segmentation, the need for large amounts of labeled data typically goes unquestioned. In particular, in the case of brain anatomy segmentation, hundreds or thousands of weakly-labeled volumes are often used as training data. In this paper, we first observe that for many brain structures, a small number of training examples, (n=9), weakly labeled using Freesurfer 6.0, plus simple data augmentation, suffice as training data to achieve high performance, achieving an overall mean Dice coefficient of $0.84 \pm 0.12$ compared to Freesurfer over 28 brain structures in T1-weighted images of $\approx 4000$ 9-10 year-olds from the Adolescent Brain Cognitive Development study. We then examine two varieties of heteroscedastic network as a method for improving classification results. An existing proposal by Kendall and Gal, which uses Monte-Carlo inference to learn to predict the variance of each prediction, yields an overall mean Dice of $0.85 \pm 0.14$ and showed statistically significant improvements over 25 brain structures. Meanwhile a novel heteroscedastic network which directly learns the probability that an example has been mislabeled yielded an overall mean Dice of $0.87 \pm 0.11$ and showed statistically significant improvements over all but one of the brain structures considered. The loss function associated to this network can be interpreted as performing a form of learned label smoothing, where labels are only smoothed where they are judged to be uncertain. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.02436v1-abstract-full').style.display = 'none'; document.getElementById('1904.02436v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.00682">arXiv:1904.00682</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1904.00682">pdf</a>, <a href="https://arxiv.org/format/1904.00682">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TMI.2019.2905770">10.1109/TMI.2019.2905770 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Standardized Assessment of Automatic Segmentation of White Matter Hyperintensities and Results of the WMH Segmentation Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kuijf%2C+H+J">Hugo J. Kuijf</a>, <a href="/search/cs?searchtype=author&amp;query=Biesbroek%2C+J+M">J. Matthijs Biesbroek</a>, <a href="/search/cs?searchtype=author&amp;query=de+Bresser%2C+J">Jeroen de Bresser</a>, <a href="/search/cs?searchtype=author&amp;query=Heinen%2C+R">Rutger Heinen</a>, <a href="/search/cs?searchtype=author&amp;query=Andermatt%2C+S">Simon Andermatt</a>, <a href="/search/cs?searchtype=author&amp;query=Bento%2C+M">Mariana Bento</a>, <a href="/search/cs?searchtype=author&amp;query=Berseth%2C+M">Matt Berseth</a>, <a href="/search/cs?searchtype=author&amp;query=Belyaev%2C+M">Mikhail Belyaev</a>, <a href="/search/cs?searchtype=author&amp;query=Cardoso%2C+M+J">M. Jorge Cardoso</a>, <a href="/search/cs?searchtype=author&amp;query=Casamitjana%2C+A">Adri脿 Casamitjana</a>, <a href="/search/cs?searchtype=author&amp;query=Collins%2C+D+L">D. Louis Collins</a>, <a href="/search/cs?searchtype=author&amp;query=Dadar%2C+M">Mahsa Dadar</a>, <a href="/search/cs?searchtype=author&amp;query=Georgiou%2C+A">Achilleas Georgiou</a>, <a href="/search/cs?searchtype=author&amp;query=Ghafoorian%2C+M">Mohsen Ghafoorian</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+D">Dakai Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Khademi%2C+A">April Khademi</a>, <a href="/search/cs?searchtype=author&amp;query=Knight%2C+J">Jesse Knight</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Hongwei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Llad%C3%B3%2C+X">Xavier Llad贸</a>, <a href="/search/cs?searchtype=author&amp;query=Luna%2C+M">Miguel Luna</a>, <a href="/search/cs?searchtype=author&amp;query=Mahmood%2C+Q">Qaiser Mahmood</a>, <a href="/search/cs?searchtype=author&amp;query=McKinley%2C+R">Richard McKinley</a>, <a href="/search/cs?searchtype=author&amp;query=Mehrtash%2C+A">Alireza Mehrtash</a>, <a href="/search/cs?searchtype=author&amp;query=Ourselin%2C+S">S茅bastien Ourselin</a>, <a href="/search/cs?searchtype=author&amp;query=Park%2C+B">Bo-yong Park</a> , et al. (19 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.00682v1-abstract-short" style="display: inline;"> Quantification of cerebral white matter hyperintensities (WMH) of presumed vascular origin is of key importance in many neurological research studies. Currently, measurements are often still obtained from manual segmentations on brain MR images, which is a laborious procedure. Automatic WMH segmentation methods exist, but a standardized comparison of the performance of such methods is lacking. We&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.00682v1-abstract-full').style.display = 'inline'; document.getElementById('1904.00682v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.00682v1-abstract-full" style="display: none;"> Quantification of cerebral white matter hyperintensities (WMH) of presumed vascular origin is of key importance in many neurological research studies. Currently, measurements are often still obtained from manual segmentations on brain MR images, which is a laborious procedure. Automatic WMH segmentation methods exist, but a standardized comparison of the performance of such methods is lacking. We organized a scientific challenge, in which developers could evaluate their method on a standardized multi-center/-scanner image dataset, giving an objective comparison: the WMH Segmentation Challenge (https://wmh.isi.uu.nl/). Sixty T1+FLAIR images from three MR scanners were released with manual WMH segmentations for training. A test set of 110 images from five MR scanners was used for evaluation. Segmentation methods had to be containerized and submitted to the challenge organizers. Five evaluation metrics were used to rank the methods: (1) Dice similarity coefficient, (2) modified Hausdorff distance (95th percentile), (3) absolute log-transformed volume difference, (4) sensitivity for detecting individual lesions, and (5) F1-score for individual lesions. Additionally, methods were ranked on their inter-scanner robustness. Twenty participants submitted their method for evaluation. This paper provides a detailed analysis of the results. In brief, there is a cluster of four methods that rank significantly better than the other methods, with one clear winner. The inter-scanner robustness ranking shows that not all methods generalize to unseen scanners. The challenge remains open for future submissions and provides a public platform for method evaluation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.00682v1-abstract-full').style.display = 'none'; document.getElementById('1904.00682v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication in IEEE Transactions on Medical Imaging</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1901.07419">arXiv:1901.07419</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1901.07419">pdf</a>, <a href="https://arxiv.org/format/1901.07419">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Simultaneous lesion and neuroanatomy segmentation in Multiple Sclerosis using deep neural networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=McKinley%2C+R">Richard McKinley</a>, <a href="/search/cs?searchtype=author&amp;query=Wepfer%2C+R">Rik Wepfer</a>, <a href="/search/cs?searchtype=author&amp;query=Aschwanden%2C+F">Fabian Aschwanden</a>, <a href="/search/cs?searchtype=author&amp;query=Grunder%2C+L">Lorenz Grunder</a>, <a href="/search/cs?searchtype=author&amp;query=Muri%2C+R">Raphaela Muri</a>, <a href="/search/cs?searchtype=author&amp;query=Rummel%2C+C">Christian Rummel</a>, <a href="/search/cs?searchtype=author&amp;query=Verma%2C+R">Rajeev Verma</a>, <a href="/search/cs?searchtype=author&amp;query=Weisstanner%2C+C">Christian Weisstanner</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Salmen%2C+A">Anke Salmen</a>, <a href="/search/cs?searchtype=author&amp;query=Chan%2C+A">Andrew Chan</a>, <a href="/search/cs?searchtype=author&amp;query=Wagner%2C+F">Franca Wagner</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1901.07419v3-abstract-short" style="display: inline;"> Segmentation of white matter lesions and deep grey matter structures is an important task in the quantification of magnetic resonance imaging in multiple sclerosis. In this paper we explore segmentation solutions based on convolutional neural networks (CNNs) for providing fast, reliable segmentations of lesions and grey-matter structures in multi-modal MR imaging, and the performance of these meth&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.07419v3-abstract-full').style.display = 'inline'; document.getElementById('1901.07419v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1901.07419v3-abstract-full" style="display: none;"> Segmentation of white matter lesions and deep grey matter structures is an important task in the quantification of magnetic resonance imaging in multiple sclerosis. In this paper we explore segmentation solutions based on convolutional neural networks (CNNs) for providing fast, reliable segmentations of lesions and grey-matter structures in multi-modal MR imaging, and the performance of these methods when applied to out-of-centre data. We trained two state-of-the-art fully convolutional CNN architectures on the 2016 MSSEG training dataset, which was annotated by seven independent human raters: a reference implementation of a 3D Unet, and a more recently proposed 3D-to-2D architecture (DeepSCAN). We then retrained those methods on a larger dataset from a single centre, with and without labels for other brain structures. We quantified changes in performance owing to dataset shift, and changes in performance by adding the additional brain-structure labels. We also compared performance with freely available reference methods. Both fully-convolutional CNN methods substantially outperform other approaches in the literature when trained and evaluated in cross-validation on the MSSEG dataset, showing agreement with human raters in the range of human inter-rater variability. Both architectures showed drops in performance when trained on single-centre data and tested on the MSSEG dataset. When trained with the addition of weak anatomical labels derived from Freesurfer, the performance of the 3D Unet degraded, while the performance of the DeepSCAN net improved. Overall, the DeepSCAN network predicting both lesion and anatomical labels was the best-performing network examined. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.07419v3-abstract-full').style.display = 'none'; document.getElementById('1901.07419v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 January, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Substantially revised version after comments from reviewers, including comparison to 3D Unet</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.04907">arXiv:1811.04907</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1811.04907">pdf</a>, <a href="https://arxiv.org/format/1811.04907">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning versus Classical Regression for Brain Tumor Patient Survival Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Suter%2C+Y">Yannick Suter</a>, <a href="/search/cs?searchtype=author&amp;query=Jungo%2C+A">Alain Jungo</a>, <a href="/search/cs?searchtype=author&amp;query=Rebsamen%2C+M">Michael Rebsamen</a>, <a href="/search/cs?searchtype=author&amp;query=Knecht%2C+U">Urspeter Knecht</a>, <a href="/search/cs?searchtype=author&amp;query=Herrmann%2C+E">Evelyn Herrmann</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.04907v1-abstract-short" style="display: inline;"> Deep learning for regression tasks on medical imaging data has shown promising results. However, compared to other approaches, their power is strongly linked to the dataset size. In this study, we evaluate 3D-convolutional neural networks (CNNs) and classical regression methods with hand-crafted features for survival time regression of patients with high grade brain tumors. The tested CNNs for reg&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.04907v1-abstract-full').style.display = 'inline'; document.getElementById('1811.04907v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.04907v1-abstract-full" style="display: none;"> Deep learning for regression tasks on medical imaging data has shown promising results. However, compared to other approaches, their power is strongly linked to the dataset size. In this study, we evaluate 3D-convolutional neural networks (CNNs) and classical regression methods with hand-crafted features for survival time regression of patients with high grade brain tumors. The tested CNNs for regression showed promising but unstable results. The best performing deep learning approach reached an accuracy of 51.5% on held-out samples of the training set. All tested deep learning experiments were outperformed by a Support Vector Classifier (SVC) using 30 radiomic features. The investigated features included intensity, shape, location and deep features. The submitted method to the BraTS 2018 survival prediction challenge is an ensemble of SVCs, which reached a cross-validated accuracy of 72.2% on the BraTS 2018 training set, 57.1% on the validation set, and 42.9% on the testing set. The results suggest that more training data is necessary for a stable performance of a CNN model for direct regression from magnetic resonance images, and that non-imaging clinical patient information is crucial along with imaging information. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.04907v1-abstract-full').style.display = 'none'; document.getElementById('1811.04907v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Contribution to The International Multimodal Brain Tumor Segmentation (BraTS) Challenge 2018, survival prediction task</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.02629">arXiv:1811.02629</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1811.02629">pdf</a>, <a href="https://arxiv.org/format/1811.02629">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Identifying the Best Machine Learning Algorithms for Brain Tumor Segmentation, Progression Assessment, and Overall Survival Prediction in the BRATS Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bakas%2C+S">Spyridon Bakas</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Jakab%2C+A">Andras Jakab</a>, <a href="/search/cs?searchtype=author&amp;query=Bauer%2C+S">Stefan Bauer</a>, <a href="/search/cs?searchtype=author&amp;query=Rempfler%2C+M">Markus Rempfler</a>, <a href="/search/cs?searchtype=author&amp;query=Crimi%2C+A">Alessandro Crimi</a>, <a href="/search/cs?searchtype=author&amp;query=Shinohara%2C+R+T">Russell Takeshi Shinohara</a>, <a href="/search/cs?searchtype=author&amp;query=Berger%2C+C">Christoph Berger</a>, <a href="/search/cs?searchtype=author&amp;query=Ha%2C+S+M">Sung Min Ha</a>, <a href="/search/cs?searchtype=author&amp;query=Rozycki%2C+M">Martin Rozycki</a>, <a href="/search/cs?searchtype=author&amp;query=Prastawa%2C+M">Marcel Prastawa</a>, <a href="/search/cs?searchtype=author&amp;query=Alberts%2C+E">Esther Alberts</a>, <a href="/search/cs?searchtype=author&amp;query=Lipkova%2C+J">Jana Lipkova</a>, <a href="/search/cs?searchtype=author&amp;query=Freymann%2C+J">John Freymann</a>, <a href="/search/cs?searchtype=author&amp;query=Kirby%2C+J">Justin Kirby</a>, <a href="/search/cs?searchtype=author&amp;query=Bilello%2C+M">Michel Bilello</a>, <a href="/search/cs?searchtype=author&amp;query=Fathallah-Shaykh%2C+H">Hassan Fathallah-Shaykh</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J">Jan Kirschke</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Colen%2C+R">Rivka Colen</a>, <a href="/search/cs?searchtype=author&amp;query=Kotrotsou%2C+A">Aikaterini Kotrotsou</a>, <a href="/search/cs?searchtype=author&amp;query=Lamontagne%2C+P">Pamela Lamontagne</a>, <a href="/search/cs?searchtype=author&amp;query=Marcus%2C+D">Daniel Marcus</a>, <a href="/search/cs?searchtype=author&amp;query=Milchenko%2C+M">Mikhail Milchenko</a> , et al. (402 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.02629v3-abstract-short" style="display: inline;"> Gliomas are the most common primary brain malignancies, with different degrees of aggressiveness, variable prognosis and various heterogeneous histologic sub-regions, i.e., peritumoral edematous/invaded tissue, necrotic core, active and non-enhancing core. This intrinsic heterogeneity is also portrayed in their radio-phenotype, as their sub-regions are depicted by varying intensity profiles dissem&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.02629v3-abstract-full').style.display = 'inline'; document.getElementById('1811.02629v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.02629v3-abstract-full" style="display: none;"> Gliomas are the most common primary brain malignancies, with different degrees of aggressiveness, variable prognosis and various heterogeneous histologic sub-regions, i.e., peritumoral edematous/invaded tissue, necrotic core, active and non-enhancing core. This intrinsic heterogeneity is also portrayed in their radio-phenotype, as their sub-regions are depicted by varying intensity profiles disseminated across multi-parametric magnetic resonance imaging (mpMRI) scans, reflecting varying biological properties. Their heterogeneous shape, extent, and location are some of the factors that make these tumors difficult to resect, and in some cases inoperable. The amount of resected tumor is a factor also considered in longitudinal scans, when evaluating the apparent tumor for potential diagnosis of progression. Furthermore, there is mounting evidence that accurate segmentation of the various tumor sub-regions can offer the basis for quantitative image analysis towards prediction of patient overall survival. This study assesses the state-of-the-art machine learning (ML) methods used for brain tumor image analysis in mpMRI scans, during the last seven instances of the International Brain Tumor Segmentation (BraTS) challenge, i.e., 2012-2018. Specifically, we focus on i) evaluating segmentations of the various glioma sub-regions in pre-operative mpMRI scans, ii) assessing potential tumor progression by virtue of longitudinal growth of tumor sub-regions, beyond use of the RECIST/RANO criteria, and iii) predicting the overall survival from pre-operative mpMRI scans of patients that underwent gross total resection. Finally, we investigate the challenge of identifying the best ML algorithms for each of these tasks, considering that apart from being diverse on each instance of the challenge, the multi-institutional mpMRI BraTS dataset has also been a continuously evolving/growing dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.02629v3-abstract-full').style.display = 'none'; document.getElementById('1811.02629v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The International Multimodal Brain Tumor Segmentation (BraTS) Challenge</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.04413">arXiv:1806.04413</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1806.04413">pdf</a>, <a href="https://arxiv.org/format/1806.04413">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-00931-1_13">10.1007/978-3-030-00931-1_13 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Enhancing clinical MRI Perfusion maps with data-driven maps of complementary nature for lesion outcome prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pinto%2C+A">Adriano Pinto</a>, <a href="/search/cs?searchtype=author&amp;query=Pereira%2C+S">Sergio Pereira</a>, <a href="/search/cs?searchtype=author&amp;query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&amp;query=Alves%2C+V">Victor Alves</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Silva%2C+C+A">Carlos A. Silva</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.04413v1-abstract-short" style="display: inline;"> Stroke is the second most common cause of death in developed countries, where rapid clinical intervention can have a major impact on a patient&#39;s life. To perform the revascularization procedure, the decision making of physicians considers its risks and benefits based on multi-modal MRI and clinical experience. Therefore, automatic prediction of the ischemic stroke lesion outcome has the potential&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.04413v1-abstract-full').style.display = 'inline'; document.getElementById('1806.04413v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.04413v1-abstract-full" style="display: none;"> Stroke is the second most common cause of death in developed countries, where rapid clinical intervention can have a major impact on a patient&#39;s life. To perform the revascularization procedure, the decision making of physicians considers its risks and benefits based on multi-modal MRI and clinical experience. Therefore, automatic prediction of the ischemic stroke lesion outcome has the potential to assist the physician towards a better stroke assessment and information about tissue outcome. Typically, automatic methods consider the information of the standard kinetic models of diffusion and perfusion MRI (e.g. Tmax, TTP, MTT, rCBF, rCBV) to perform lesion outcome prediction. In this work, we propose a deep learning method to fuse this information with an automated data selection of the raw 4D PWI image information, followed by a data-driven deep-learning modeling of the underlying blood flow hemodynamics. We demonstrate the ability of the proposed approach to improve prediction of tissue at risk before therapy, as compared to only using the standard clinical perfusion maps, hence suggesting on the potential benefits of the proposed data-driven raw perfusion data modelling approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.04413v1-abstract-full').style.display = 'none'; document.getElementById('1806.04413v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at MICCAI 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.03848">arXiv:1806.03848</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1806.03848">pdf</a>, <a href="https://arxiv.org/format/1806.03848">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Synthetic Perfusion Maps: Imaging Perfusion Deficits in DSC-MRI with Deep Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hess%2C+A">Andreas Hess</a>, <a href="/search/cs?searchtype=author&amp;query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&amp;query=Kaesmacher%2C+J">Johannes Kaesmacher</a>, <a href="/search/cs?searchtype=author&amp;query=Jung%2C+S">Simon Jung</a>, <a href="/search/cs?searchtype=author&amp;query=Scalzo%2C+F">Fabien Scalzo</a>, <a href="/search/cs?searchtype=author&amp;query=Liebeskind%2C+D">David Liebeskind</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=McKinley%2C+R">Richard McKinley</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.03848v1-abstract-short" style="display: inline;"> In this work, we present a novel convolutional neural net- work based method for perfusion map generation in dynamic suscepti- bility contrast-enhanced perfusion imaging. The proposed architecture is trained end-to-end and solely relies on raw perfusion data for inference. We used a dataset of 151 acute ischemic stroke cases for evaluation. Our method generates perfusion maps that are comparable t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.03848v1-abstract-full').style.display = 'inline'; document.getElementById('1806.03848v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.03848v1-abstract-full" style="display: none;"> In this work, we present a novel convolutional neural net- work based method for perfusion map generation in dynamic suscepti- bility contrast-enhanced perfusion imaging. The proposed architecture is trained end-to-end and solely relies on raw perfusion data for inference. We used a dataset of 151 acute ischemic stroke cases for evaluation. Our method generates perfusion maps that are comparable to the target maps used for clinical routine, while being model-free, fast, and less noisy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.03848v1-abstract-full').style.display = 'none'; document.getElementById('1806.03848v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.02562">arXiv:1806.02562</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1806.02562">pdf</a>, <a href="https://arxiv.org/format/1806.02562">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> On the Effect of Inter-observer Variability for a Reliable Estimation of Uncertainty of Medical Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jungo%2C+A">Alain Jungo</a>, <a href="/search/cs?searchtype=author&amp;query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&amp;query=Ermis%2C+E">Ekin Ermis</a>, <a href="/search/cs?searchtype=author&amp;query=Blatti-Moreno%2C+M">Marcela Blatti-Moreno</a>, <a href="/search/cs?searchtype=author&amp;query=Herrmann%2C+E">Evelyn Herrmann</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.02562v1-abstract-short" style="display: inline;"> Uncertainty estimation methods are expected to improve the understanding and quality of computer-assisted methods used in medical applications (e.g., neurosurgical interventions, radiotherapy planning), where automated medical image segmentation is crucial. In supervised machine learning, a common practice to generate ground truth label data is to merge observer annotations. However, as many medic&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.02562v1-abstract-full').style.display = 'inline'; document.getElementById('1806.02562v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.02562v1-abstract-full" style="display: none;"> Uncertainty estimation methods are expected to improve the understanding and quality of computer-assisted methods used in medical applications (e.g., neurosurgical interventions, radiotherapy planning), where automated medical image segmentation is crucial. In supervised machine learning, a common practice to generate ground truth label data is to merge observer annotations. However, as many medical image tasks show a high inter-observer variability resulting from factors such as image quality, different levels of user expertise and domain knowledge, little is known as to how inter-observer variability and commonly used fusion methods affect the estimation of uncertainty of automated image segmentation. In this paper we analyze the effect of common image label fusion techniques on uncertainty estimation, and propose to learn the uncertainty among observers. The results highlight the negative effect of fusion methods applied in deep learning, to obtain reliable estimates of segmentation uncertainty. Additionally, we show that the learned observers&#39; uncertainty can be combined with current standard Monte Carlo dropout Bayesian neural networks to characterize uncertainty of model&#39;s parameters. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.02562v1-abstract-full').style.display = 'none'; document.getElementById('1806.02562v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Appears in Medical Image Computing and Computer Assisted Interventions (MICCAI), 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1703.00312">arXiv:1703.00312</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1703.00312">pdf</a>, <a href="https://arxiv.org/format/1703.00312">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Perturb-and-MPM: Quantifying Segmentation Uncertainty in Dense Multi-Label CRFs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&amp;query=Knecht%2C+U">Urspeter Knecht</a>, <a href="/search/cs?searchtype=author&amp;query=Jungo%2C+A">Alain Jungo</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1703.00312v2-abstract-short" style="display: inline;"> This paper proposes a novel approach for uncertainty quantification in dense Conditional Random Fields (CRFs). The presented approach, called Perturb-and-MPM, enables efficient, approximate sampling from dense multi-label CRFs via random perturbations. An analytic error analysis was performed which identified the main cause of approximation error as well as showed that the error is bounded. Spatia&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.00312v2-abstract-full').style.display = 'inline'; document.getElementById('1703.00312v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1703.00312v2-abstract-full" style="display: none;"> This paper proposes a novel approach for uncertainty quantification in dense Conditional Random Fields (CRFs). The presented approach, called Perturb-and-MPM, enables efficient, approximate sampling from dense multi-label CRFs via random perturbations. An analytic error analysis was performed which identified the main cause of approximation error as well as showed that the error is bounded. Spatial uncertainty maps can be derived from the Perturb-and-MPM model, which can be used to visualize uncertainty in image segmentation results. The method is validated on synthetic and clinical Magnetic Resonance Imaging data. The effectiveness of the approach is demonstrated on the challenging problem of segmenting the tumor core in glioblastoma. We found that areas of high uncertainty correspond well to wrongly segmented image regions. Furthermore, we demonstrate the potential use of uncertainty maps to refine imaging biomarkers in the case of extent of resection and residual tumor volume in brain tumor patients. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.00312v2-abstract-full').style.display = 'none'; document.getElementById('1703.00312v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Deactivated review mode (line spacing)</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10