CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 148 results for author: <span class="mathjax">Menze, B</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Menze%2C+B">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Menze, B"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Menze%2C+B&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Menze, B"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Menze%2C+B&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Menze%2C+B&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Menze%2C+B&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Menze%2C+B&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.14514">arXiv:2501.14514</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.14514">pdf</a>, <a href="https://arxiv.org/format/2501.14514">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> PARASIDE: An Automatic Paranasal Sinus Segmentation and Structure Analysis Tool for MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=M%C3%B6ller%2C+H">Hendrik M枚ller</a>, <a href="/search/cs?searchtype=author&amp;query=Krautschick%2C+L">Lukas Krautschick</a>, <a href="/search/cs?searchtype=author&amp;query=Atad%2C+M">Matan Atad</a>, <a href="/search/cs?searchtype=author&amp;query=Graf%2C+R">Robert Graf</a>, <a href="/search/cs?searchtype=author&amp;query=Busch%2C+C">Chia-Jung Busch</a>, <a href="/search/cs?searchtype=author&amp;query=Beule%2C+A">Achim Beule</a>, <a href="/search/cs?searchtype=author&amp;query=Scharf%2C+C">Christian Scharf</a>, <a href="/search/cs?searchtype=author&amp;query=Kaderali%2C+L">Lars Kaderali</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J">Jan Kirschke</a>, <a href="/search/cs?searchtype=author&amp;query=Schwitzing%2C+F">Fabian Schwitzing</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.14514v1-abstract-short" style="display: inline;"> Chronic rhinosinusitis (CRS) is a common and persistent sinus imflammation that affects 5 - 12\% of the general population. It significantly impacts quality of life and is often difficult to assess due to its subjective nature in clinical evaluation. We introduce PARASIDE, an automatic tool for segmenting air and soft tissue volumes of the structures of the sinus maxillaris, frontalis, sphenodalis&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.14514v1-abstract-full').style.display = 'inline'; document.getElementById('2501.14514v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.14514v1-abstract-full" style="display: none;"> Chronic rhinosinusitis (CRS) is a common and persistent sinus imflammation that affects 5 - 12\% of the general population. It significantly impacts quality of life and is often difficult to assess due to its subjective nature in clinical evaluation. We introduce PARASIDE, an automatic tool for segmenting air and soft tissue volumes of the structures of the sinus maxillaris, frontalis, sphenodalis and ethmoidalis in T1 MRI. By utilizing that segmentation, we can quantify feature relations that have been observed only manually and subjectively before. We performed an exemplary study and showed both volume and intensity relations between structures and radiology reports. While the soft tissue segmentation is good, the automated annotations of the air volumes are excellent. The average intensity over air structures are consistently below those of the soft tissues, close to perfect separability. Healthy subjects exhibit lower soft tissue volumes and lower intensities. Our developed system is the first automated whole nasal segmentation of 16 structures, and capable of calculating medical relevant features such as the Lund-Mackay score. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.14514v1-abstract-full').style.display = 'none'; document.getElementById('2501.14514v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.08226">arXiv:2501.08226</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.08226">pdf</a>, <a href="https://arxiv.org/format/2501.08226">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Efficient Deep Learning-based Forward Solvers for Brain Tumor Growth Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Haouari%2C+Z">Zeineb Haouari</a>, <a href="/search/cs?searchtype=author&amp;query=Weidner%2C+J">Jonas Weidner</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Varma%2C+A">Aswathi Varma</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.08226v1-abstract-short" style="display: inline;"> Glioblastoma, a highly aggressive brain tumor, poses major challenges due to its poor prognosis and high morbidity rates. Partial differential equation-based models offer promising potential to enhance therapeutic outcomes by simulating patient-specific tumor behavior for improved radiotherapy planning. However, model calibration remains a bottleneck due to the high computational demands of optimi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.08226v1-abstract-full').style.display = 'inline'; document.getElementById('2501.08226v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.08226v1-abstract-full" style="display: none;"> Glioblastoma, a highly aggressive brain tumor, poses major challenges due to its poor prognosis and high morbidity rates. Partial differential equation-based models offer promising potential to enhance therapeutic outcomes by simulating patient-specific tumor behavior for improved radiotherapy planning. However, model calibration remains a bottleneck due to the high computational demands of optimization methods like Monte Carlo sampling and evolutionary algorithms. To address this, we recently introduced an approach leveraging a neural forward solver with gradient-based optimization to significantly reduce calibration time. This approach requires a highly accurate and fully differentiable forward model. We investigate multiple architectures, including (i) an enhanced TumorSurrogate, (ii) a modified nnU-Net, and (iii) a 3D Vision Transformer (ViT). The optimized TumorSurrogate achieved the best overall results, excelling in both tumor outline matching and voxel-level prediction of tumor cell concentration. It halved the MSE relative to the baseline model and achieved the highest Dice score across all tumor cell concentration thresholds. Our study demonstrates significant enhancement in forward solver performance and outlines important future research directions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.08226v1-abstract-full').style.display = 'none'; document.getElementById('2501.08226v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.13811">arXiv:2412.13811</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.13811">pdf</a>, <a href="https://arxiv.org/format/2412.13811">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Spatial Brain Tumor Concentration Estimation for Individualized Radiotherapy Planning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Weidner%2C+J">Jonas Weidner</a>, <a href="/search/cs?searchtype=author&amp;query=Balcerak%2C+M">Michal Balcerak</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Datchev%2C+A">Andr茅 Datchev</a>, <a href="/search/cs?searchtype=author&amp;query=Lux%2C+L">Laurin Lux</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+L+Z+D">Lucas Zimmerand Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bj枚rn Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.13811v1-abstract-short" style="display: inline;"> Biophysical modeling of brain tumors has emerged as a promising strategy for personalizing radiotherapy planning by estimating the otherwise hidden distribution of tumor cells within the brain. However, many existing state-of-the-art methods are computationally intensive, limiting their widespread translation into clinical practice. In this work, we propose an efficient and direct method that util&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.13811v1-abstract-full').style.display = 'inline'; document.getElementById('2412.13811v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.13811v1-abstract-full" style="display: none;"> Biophysical modeling of brain tumors has emerged as a promising strategy for personalizing radiotherapy planning by estimating the otherwise hidden distribution of tumor cells within the brain. However, many existing state-of-the-art methods are computationally intensive, limiting their widespread translation into clinical practice. In this work, we propose an efficient and direct method that utilizes soft physical constraints to estimate the tumor cell concentration from preoperative MRI of brain tumor patients. Our approach optimizes a 3D tumor concentration field by simultaneously minimizing the difference between the observed MRI and a physically informed loss function. Compared to existing state-of-the-art techniques, our method significantly improves predicting tumor recurrence on two public datasets with a total of 192 patients while maintaining a clinically viable runtime of under one minute - a substantial reduction from the 30 minutes required by the current best approach. Furthermore, we showcase the generalizability of our framework by incorporating additional imaging information and physical constraints, highlighting its potential to translate to various medical diffusion phenomena with imperfect data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.13811v1-abstract-full').style.display = 'none'; document.getElementById('2412.13811v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.17386">arXiv:2411.17386</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.17386">pdf</a>, <a href="https://arxiv.org/format/2411.17386">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> vesselFM: A Foundation Model for Universal 3D Blood Vessel Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wittmann%2C+B">Bastian Wittmann</a>, <a href="/search/cs?searchtype=author&amp;query=Wattenberg%2C+Y">Yannick Wattenberg</a>, <a href="/search/cs?searchtype=author&amp;query=Amiranashvili%2C+T">Tamaz Amiranashvili</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.17386v1-abstract-short" style="display: inline;"> Segmenting 3D blood vessels is a critical yet challenging task in medical image analysis. This is due to significant imaging modality-specific variations in artifacts, vascular patterns and scales, signal-to-noise ratios, and background tissues. These variations, along with domain gaps arising from varying imaging protocols, limit the generalization of existing supervised learning-based methods, r&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17386v1-abstract-full').style.display = 'inline'; document.getElementById('2411.17386v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.17386v1-abstract-full" style="display: none;"> Segmenting 3D blood vessels is a critical yet challenging task in medical image analysis. This is due to significant imaging modality-specific variations in artifacts, vascular patterns and scales, signal-to-noise ratios, and background tissues. These variations, along with domain gaps arising from varying imaging protocols, limit the generalization of existing supervised learning-based methods, requiring tedious voxel-level annotations for each dataset separately. While foundation models promise to alleviate this limitation, they typically fail to generalize to the task of blood vessel segmentation, posing a unique, complex problem. In this work, we present vesselFM, a foundation model designed specifically for the broad task of 3D blood vessel segmentation. Unlike previous models, vesselFM can effortlessly generalize to unseen domains. To achieve zero-shot generalization, we train vesselFM on three heterogeneous data sources: a large, curated annotated dataset, data generated by a domain randomization scheme, and data sampled from a flow matching-based generative model. Extensive evaluations show that vesselFM outperforms state-of-the-art medical image segmentation foundation models across four (pre-)clinically relevant imaging modalities in zero-, one-, and few-shot scenarios, therefore providing a universal solution for 3D blood vessel segmentation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17386v1-abstract-full').style.display = 'none'; document.getElementById('2411.17386v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09822">arXiv:2411.09822</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09822">pdf</a>, <a href="https://arxiv.org/format/2411.09822">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> A Self-Supervised Model for Multi-modal Stroke Risk Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Delgrange%2C+C">Camille Delgrange</a>, <a href="/search/cs?searchtype=author&amp;query=Demler%2C+O">Olga Demler</a>, <a href="/search/cs?searchtype=author&amp;query=Mora%2C+S">Samia Mora</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Davoudi%2C+N">Neda Davoudi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09822v1-abstract-short" style="display: inline;"> Predicting stroke risk is a complex challenge that can be enhanced by integrating diverse clinically available data modalities. This study introduces a self-supervised multimodal framework that combines 3D brain imaging, clinical data, and image-derived features to improve stroke risk prediction prior to onset. By leveraging large unannotated clinical datasets, the framework captures complementary&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09822v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09822v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09822v1-abstract-full" style="display: none;"> Predicting stroke risk is a complex challenge that can be enhanced by integrating diverse clinically available data modalities. This study introduces a self-supervised multimodal framework that combines 3D brain imaging, clinical data, and image-derived features to improve stroke risk prediction prior to onset. By leveraging large unannotated clinical datasets, the framework captures complementary and synergistic information across image and tabular data modalities. Our approach is based on a contrastive learning framework that couples contrastive language-image pretraining with an image-tabular matching module, to better align multimodal data representations in a shared latent space. The model is trained on the UK Biobank, which includes structural brain MRI and clinical data. We benchmark its performance against state-of-the-art unimodal and multimodal methods using tabular, image, and image-tabular combinations under diverse frozen and trainable model settings. The proposed model outperformed self-supervised tabular (image) methods by 2.6% (2.6%) in ROC-AUC and by 3.3% (5.6%) in balanced accuracy. Additionally, it showed a 7.6% increase in balanced accuracy compared to the best multimodal supervised model. Through interpretable tools, our approach demonstrated better integration of tabular and image data, providing richer and more aligned embeddings. Gradient-weighted Class Activation Mapping heatmaps further revealed activated brain regions commonly associated in the literature with brain aging, stroke risk, and clinical outcomes. This robust self-supervised multimodal framework surpasses state-of-the-art methods for stroke risk prediction and offers a strong foundation for future studies integrating diverse data modalities to advance clinical predictive modelling. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09822v1-abstract-full').style.display = 'none'; document.getElementById('2411.09822v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted as oral paper at AIM-FM workshop, Neurips 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05900">arXiv:2411.05900</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05900">pdf</a>, <a href="https://arxiv.org/format/2411.05900">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Enhancing Cardiovascular Disease Prediction through Multi-Modal Self-Supervised Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Girlanda%2C+F">Francesco Girlanda</a>, <a href="/search/cs?searchtype=author&amp;query=Demler%2C+O">Olga Demler</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Davoudi%2C+N">Neda Davoudi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05900v1-abstract-short" style="display: inline;"> Accurate prediction of cardiovascular diseases remains imperative for early diagnosis and intervention, necessitating robust and precise predictive models. Recently, there has been a growing interest in multi-modal learning for uncovering novel insights not available through uni-modal datasets alone. By combining cardiac magnetic resonance images, electrocardiogram signals, and available medical i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05900v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05900v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05900v1-abstract-full" style="display: none;"> Accurate prediction of cardiovascular diseases remains imperative for early diagnosis and intervention, necessitating robust and precise predictive models. Recently, there has been a growing interest in multi-modal learning for uncovering novel insights not available through uni-modal datasets alone. By combining cardiac magnetic resonance images, electrocardiogram signals, and available medical information, our approach enables the capture of holistic status about individuals&#39; cardiovascular health by leveraging shared information across modalities. Integrating information from multiple modalities and benefiting from self-supervised learning techniques, our model provides a comprehensive framework for enhancing cardiovascular disease prediction with limited annotated datasets. We employ a masked autoencoder to pre-train the electrocardiogram ECG encoder, enabling it to extract relevant features from raw electrocardiogram data, and an image encoder to extract relevant features from cardiac magnetic resonance images. Subsequently, we utilize a multi-modal contrastive learning objective to transfer knowledge from expensive and complex modality, cardiac magnetic resonance image, to cheap and simple modalities such as electrocardiograms and medical information. Finally, we fine-tuned the pre-trained encoders on specific predictive tasks, such as myocardial infarction. Our proposed method enhanced the image information by leveraging different available modalities and outperformed the supervised approach by 7.6% in balanced accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05900v1-abstract-full').style.display = 'none'; document.getElementById('2411.05900v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to British Machine Vision Conference (BMVC) 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05597">arXiv:2411.05597</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05597">pdf</a>, <a href="https://arxiv.org/ps/2411.05597">ps</a>, <a href="https://arxiv.org/format/2411.05597">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Predicting Stroke through Retinal Graphs and Multimodal Self-supervised Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Y">Yuqing Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Wittmann%2C+B">Bastian Wittmann</a>, <a href="/search/cs?searchtype=author&amp;query=Demler%2C+O">Olga Demler</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Davoudi%2C+N">Neda Davoudi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05597v1-abstract-short" style="display: inline;"> Early identification of stroke is crucial for intervention, requiring reliable models. We proposed an efficient retinal image representation together with clinical information to capture a comprehensive overview of cardiovascular health, leveraging large multimodal datasets for new medical insights. Our approach is one of the first contrastive frameworks that integrates graph and tabular data, usi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05597v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05597v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05597v1-abstract-full" style="display: none;"> Early identification of stroke is crucial for intervention, requiring reliable models. We proposed an efficient retinal image representation together with clinical information to capture a comprehensive overview of cardiovascular health, leveraging large multimodal datasets for new medical insights. Our approach is one of the first contrastive frameworks that integrates graph and tabular data, using vessel graphs derived from retinal images for efficient representation. This method, combined with multimodal contrastive learning, significantly enhances stroke prediction accuracy by integrating data from multiple sources and using contrastive learning for transfer learning. The self-supervised learning techniques employed allow the model to learn effectively from unlabeled data, reducing the dependency on large annotated datasets. Our framework showed an AUROC improvement of 3.78% from supervised to self-supervised approaches. Additionally, the graph-level representation approach achieved superior performance to image encoders while significantly reducing pre-training and fine-tuning runtimes. These findings indicate that retinal images are a cost-effective method for improving cardiovascular disease predictions and pave the way for future research into retinal and cerebral vessel connections and the use of graph-based retinal vessel representations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05597v1-abstract-full').style.display = 'none'; document.getElementById('2411.05597v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted as oral paper at ML-CDS workshop, MICCAI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.23318">arXiv:2410.23318</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.23318">pdf</a>, <a href="https://arxiv.org/format/2410.23318">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Denoising Diffusion Probabilistic Models for Magnetic Resonance Fingerprinting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mayo%2C+P">Perla Mayo</a>, <a href="/search/cs?searchtype=author&amp;query=Pirkl%2C+C+M">Carolin M. Pirkl</a>, <a href="/search/cs?searchtype=author&amp;query=Achim%2C+A">Alin Achim</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B+H">Bjoern H. Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Golbabaee%2C+M">Mohammad Golbabaee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.23318v2-abstract-short" style="display: inline;"> Magnetic Resonance Fingerprinting (MRF) is a time-efficient approach to quantitative MRI, enabling the mapping of multiple tissue properties from a single, accelerated scan. However, achieving accurate reconstructions remains challenging, particularly in highly accelerated and undersampled acquisitions, which are crucial for reducing scan times. While deep learning techniques have advanced image r&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23318v2-abstract-full').style.display = 'inline'; document.getElementById('2410.23318v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.23318v2-abstract-full" style="display: none;"> Magnetic Resonance Fingerprinting (MRF) is a time-efficient approach to quantitative MRI, enabling the mapping of multiple tissue properties from a single, accelerated scan. However, achieving accurate reconstructions remains challenging, particularly in highly accelerated and undersampled acquisitions, which are crucial for reducing scan times. While deep learning techniques have advanced image reconstruction, the recent introduction of diffusion models offers new possibilities for imaging tasks, though their application in the medical field is still emerging. Notably, diffusion models have not yet been explored for the MRF problem. In this work, we propose for the first time a conditional diffusion probabilistic model for MRF image reconstruction. Qualitative and quantitative comparisons on in-vivo brain scan data demonstrate that the proposed approach can outperform established deep learning and compressed sensing algorithms for MRF reconstruction. Extensive ablation studies also explore strategies to improve computational efficiency of our approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23318v2-abstract-full').style.display = 'none'; document.getElementById('2410.23318v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 5 figures, 3 tables, 2 algorithms</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.20409">arXiv:2409.20409</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.20409">pdf</a>, <a href="https://arxiv.org/format/2409.20409">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> </div> <p class="title is-5 mathjax"> Physics-Regularized Multi-Modal Image Assimilation for Brain Tumor Localization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Balcerak%2C+M">Michal Balcerak</a>, <a href="/search/cs?searchtype=author&amp;query=Amiranashvili%2C+T">Tamaz Amiranashvili</a>, <a href="/search/cs?searchtype=author&amp;query=Wagner%2C+A">Andreas Wagner</a>, <a href="/search/cs?searchtype=author&amp;query=Weidner%2C+J">Jonas Weidner</a>, <a href="/search/cs?searchtype=author&amp;query=Karnakov%2C+P">Petr Karnakov</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Koumoutsakos%2C+P">Petros Koumoutsakos</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.20409v3-abstract-short" style="display: inline;"> Physical models in the form of partial differential equations serve as important priors for many under-constrained problems. One such application is tumor treatment planning, which relies on accurately estimating the spatial distribution of tumor cells within a patient&#39;s anatomy. While medical imaging can detect the bulk of a tumor, it cannot capture the full extent of its spread, as low-concentra&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.20409v3-abstract-full').style.display = 'inline'; document.getElementById('2409.20409v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.20409v3-abstract-full" style="display: none;"> Physical models in the form of partial differential equations serve as important priors for many under-constrained problems. One such application is tumor treatment planning, which relies on accurately estimating the spatial distribution of tumor cells within a patient&#39;s anatomy. While medical imaging can detect the bulk of a tumor, it cannot capture the full extent of its spread, as low-concentration tumor cells often remain undetectable, particularly in glioblastoma, the most common primary brain tumor. Machine learning approaches struggle to estimate the complete tumor cell distribution due to a lack of appropriate training data. Consequently, most existing methods rely on physics-based simulations to generate anatomically and physiologically plausible estimations. However, these approaches face challenges with complex and unknown initial conditions and are constrained by overly rigid physical models. In this work, we introduce a novel method that integrates data-driven and physics-based cost functions, akin to Physics-Informed Neural Networks (PINNs). However, our approach parametrizes the solution directly on a dynamic discrete mesh, allowing for the effective modeling of complex biomechanical behaviors. Specifically, we propose a unique discretization scheme that quantifies how well the learned spatiotemporal distributions of tumor and brain tissues adhere to their respective growth and elasticity equations. This quantification acts as a regularization term, offering greater flexibility and improved integration of patient data compared to existing models. We demonstrate enhanced coverage of tumor recurrence areas using real-world data from a patient cohort, highlighting the potential of our method to improve model-driven treatment planning for glioblastoma in clinical practice. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.20409v3-abstract-full').style.display = 'none'; document.getElementById('2409.20409v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to NeurIPS 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.06609">arXiv:2409.06609</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.06609">pdf</a>, <a href="https://arxiv.org/ps/2409.06609">ps</a>, <a href="https://arxiv.org/format/2409.06609">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Improving the Precision of CNNs for Magnetic Resonance Spectral Modeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=LaMaster%2C+J">John LaMaster</a>, <a href="/search/cs?searchtype=author&amp;query=Das%2C+D">Dhritiman Das</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Crane%2C+J">Jason Crane</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yan Li</a>, <a href="/search/cs?searchtype=author&amp;query=Lasser%2C+T">Tobias Lasser</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B+H">Bjoern H Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.06609v1-abstract-short" style="display: inline;"> Magnetic resonance spectroscopic imaging is a widely available imaging modality that can non-invasively provide a metabolic profile of the tissue of interest, yet is challenging to integrate clinically. One major reason is the expensive, expert data processing and analysis that is required. Using machine learning to predict MRS-related quantities offers avenues around this problem, but deep learni&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.06609v1-abstract-full').style.display = 'inline'; document.getElementById('2409.06609v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.06609v1-abstract-full" style="display: none;"> Magnetic resonance spectroscopic imaging is a widely available imaging modality that can non-invasively provide a metabolic profile of the tissue of interest, yet is challenging to integrate clinically. One major reason is the expensive, expert data processing and analysis that is required. Using machine learning to predict MRS-related quantities offers avenues around this problem, but deep learning models bring their own challenges, especially model trust. Current research trends focus primarily on mean error metrics, but comprehensive precision metrics are also needed, e.g. standard deviations, confidence intervals, etc.. This work highlights why more comprehensive error characterization is important and how to improve the precision of CNNs for spectral modeling, a quantitative task. The results highlight advantages and trade-offs of these techniques that should be considered when addressing such regression tasks with CNNs. Detailed insights into the underlying mechanisms of each technique, and how they interact with other techniques, are discussed in depth. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.06609v1-abstract-full').style.display = 'none'; document.getElementById('2409.06609v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 1 figure, 2 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.m; I.4.m </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.11142">arXiv:2408.11142</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.11142">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ISLES 2024: The first longitudinal multimodal multi-center real-world dataset in (sub-)acute stroke </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Riedel%2C+E+O">Evamaria O. Riedel</a>, <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Baran%2C+T+A">The Anh Baran</a>, <a href="/search/cs?searchtype=author&amp;query=Petzsche%2C+M+H">Moritz Hernandez Petzsche</a>, <a href="/search/cs?searchtype=author&amp;query=Baazaoui%2C+H">Hakim Baazaoui</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Robben%2C+D">David Robben</a>, <a href="/search/cs?searchtype=author&amp;query=Seia%2C+J+O">Joaquin Oscar Seia</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+R">Ruisheng Su</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmer%2C+C">Claus Zimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Boeckh-Behrens%2C+T">Tobias Boeckh-Behrens</a>, <a href="/search/cs?searchtype=author&amp;query=Berndt%2C+M">Maria Berndt</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Wegener%2C+S">Susanne Wegener</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.11142v1-abstract-short" style="display: inline;"> Stroke remains a leading cause of global morbidity and mortality, placing a heavy socioeconomic burden. Over the past decade, advances in endovascular reperfusion therapy and the use of CT and MRI imaging for treatment guidance have significantly improved patient outcomes and are now standard in clinical practice. To develop machine learning algorithms that can extract meaningful and reproducible&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11142v1-abstract-full').style.display = 'inline'; document.getElementById('2408.11142v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.11142v1-abstract-full" style="display: none;"> Stroke remains a leading cause of global morbidity and mortality, placing a heavy socioeconomic burden. Over the past decade, advances in endovascular reperfusion therapy and the use of CT and MRI imaging for treatment guidance have significantly improved patient outcomes and are now standard in clinical practice. To develop machine learning algorithms that can extract meaningful and reproducible models of brain function for both clinical and research purposes from stroke images - particularly for lesion identification, brain health quantification, and prognosis - large, diverse, and well-annotated public datasets are essential. While only a few datasets with (sub-)acute stroke data were previously available, several large, high-quality datasets have recently been made publicly accessible. However, these existing datasets include only MRI data. In contrast, our dataset is the first to offer comprehensive longitudinal stroke data, including acute CT imaging with angiography and perfusion, follow-up MRI at 2-9 days, as well as acute and longitudinal clinical data up to a three-month outcome. The dataset includes a training dataset of n = 150 and a test dataset of n = 100 scans. Training data is publicly available, while test data will be used exclusively for model validation. We are making this dataset available as part of the 2024 edition of the Ischemic Stroke Lesion Segmentation (ISLES) challenge (https://www.isles-challenge.org/), which continuously aims to establish benchmark methods for acute and sub-acute ischemic stroke lesion segmentation, aiding in creating open stroke imaging datasets and evaluating cutting-edge image processing algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11142v1-abstract-full').style.display = 'none'; document.getElementById('2408.11142v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.10966">arXiv:2408.10966</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.10966">pdf</a>, <a href="https://arxiv.org/format/2408.10966">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ISLES&#39;24: Improving final infarct prediction in ischemic stroke using multimodal imaging and clinical data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+R">Ruisheng Su</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Riedel%2C+E+O">Evamaria O. Riedel</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Baazaoui%2C+H">Hakim Baazaoui</a>, <a href="/search/cs?searchtype=author&amp;query=Robben%2C+D">David Robben</a>, <a href="/search/cs?searchtype=author&amp;query=Wegener%2C+S">Susanne Wegener</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.10966v1-abstract-short" style="display: inline;"> Accurate estimation of core (irreversibly damaged tissue) and penumbra (salvageable tissue) volumes is essential for ischemic stroke treatment decisions. Perfusion CT, the clinical standard, estimates these volumes but is affected by variations in deconvolution algorithms, implementations, and thresholds. Core tissue expands over time, with growth rates influenced by thrombus location, collateral&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10966v1-abstract-full').style.display = 'inline'; document.getElementById('2408.10966v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.10966v1-abstract-full" style="display: none;"> Accurate estimation of core (irreversibly damaged tissue) and penumbra (salvageable tissue) volumes is essential for ischemic stroke treatment decisions. Perfusion CT, the clinical standard, estimates these volumes but is affected by variations in deconvolution algorithms, implementations, and thresholds. Core tissue expands over time, with growth rates influenced by thrombus location, collateral circulation, and inherent patient-specific factors. Understanding this tissue growth is crucial for determining the need to transfer patients to comprehensive stroke centers, predicting the benefits of additional reperfusion attempts during mechanical thrombectomy, and forecasting final clinical outcomes. This work presents the ISLES&#39;24 challenge, which addresses final post-treatment stroke infarct prediction from pre-interventional acute stroke imaging and clinical data. ISLES&#39;24 establishes a unique 360-degree setting where all feasibly accessible clinical data are available for participants, including full CT acute stroke imaging, sub-acute follow-up MRI, and clinical tabular data. The contributions of this work are two-fold: first, we introduce a standardized benchmarking of final stroke infarct segmentation algorithms through the ISLES&#39;24 challenge; second, we provide insights into infarct segmentation using multimodal imaging and clinical data strategies by identifying outperforming methods on a finely curated dataset. The outputs of this challenge are anticipated to enhance clinical decision-making and improve patient outcome predictions. All ISLES&#39;24 materials, including data, performance evaluation scripts, and leading algorithmic strategies, are available to the research community following \url{https://isles-24.grand-challenge.org/}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10966v1-abstract-full').style.display = 'none'; document.getElementById('2408.10966v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.02367">arXiv:2408.02367</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.02367">pdf</a>, <a href="https://arxiv.org/format/2408.02367">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-031-73290-4_13">10.1007/978-3-031-73290-4_13 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> StoDIP: Efficient 3D MRF image reconstruction with deep image priors and stochastic iterations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mayo%2C+P">Perla Mayo</a>, <a href="/search/cs?searchtype=author&amp;query=Cencini%2C+M">Matteo Cencini</a>, <a href="/search/cs?searchtype=author&amp;query=Pirkl%2C+C+M">Carolin M. Pirkl</a>, <a href="/search/cs?searchtype=author&amp;query=Menzel%2C+M+I">Marion I. Menzel</a>, <a href="/search/cs?searchtype=author&amp;query=Tosetti%2C+M">Michela Tosetti</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B+H">Bjoern H. Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Golbabaee%2C+M">Mohammad Golbabaee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.02367v1-abstract-short" style="display: inline;"> Magnetic Resonance Fingerprinting (MRF) is a time-efficient approach to quantitative MRI for multiparametric tissue mapping. The reconstruction of quantitative maps requires tailored algorithms for removing aliasing artefacts from the compressed sampled MRF acquisitions. Within approaches found in the literature, many focus solely on two-dimensional (2D) image reconstruction, neglecting the extens&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.02367v1-abstract-full').style.display = 'inline'; document.getElementById('2408.02367v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.02367v1-abstract-full" style="display: none;"> Magnetic Resonance Fingerprinting (MRF) is a time-efficient approach to quantitative MRI for multiparametric tissue mapping. The reconstruction of quantitative maps requires tailored algorithms for removing aliasing artefacts from the compressed sampled MRF acquisitions. Within approaches found in the literature, many focus solely on two-dimensional (2D) image reconstruction, neglecting the extension to volumetric (3D) scans despite their higher relevance and clinical value. A reason for this is that transitioning to 3D imaging without appropriate mitigations presents significant challenges, including increased computational cost and storage requirements, and the need for large amount of ground-truth (artefact-free) data for training. To address these issues, we introduce StoDIP, a new algorithm that extends the ground-truth-free Deep Image Prior (DIP) reconstruction to 3D MRF imaging. StoDIP employs memory-efficient stochastic updates across the multicoil MRF data, a carefully selected neural network architecture, as well as faster nonuniform FFT (NUFFT) transformations. This enables a faster convergence compared against a conventional DIP implementation without these features. Tested on a dataset of whole-brain scans from healthy volunteers, StoDIP demonstrated superior performance over the ground-truth-free reconstruction baselines, both quantitatively and qualitatively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.02367v1-abstract-full').style.display = 'none'; document.getElementById('2408.02367v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 2 figures, 1 table, 1 algorithm</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.19866">arXiv:2407.19866</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.19866">pdf</a>, <a href="https://arxiv.org/format/2407.19866">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Deep Image Priors for Magnetic Resonance Fingerprinting with pretrained Bloch-consistent denoising autoencoders </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mayo%2C+P">Perla Mayo</a>, <a href="/search/cs?searchtype=author&amp;query=Cencini%2C+M">Matteo Cencini</a>, <a href="/search/cs?searchtype=author&amp;query=Fatania%2C+K">Ketan Fatania</a>, <a href="/search/cs?searchtype=author&amp;query=Pirkl%2C+C+M">Carolin M. Pirkl</a>, <a href="/search/cs?searchtype=author&amp;query=Menzel%2C+M+I">Marion I. Menzel</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B+H">Bjoern H. Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Tosetti%2C+M">Michela Tosetti</a>, <a href="/search/cs?searchtype=author&amp;query=Golbabaee%2C+M">Mohammad Golbabaee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.19866v1-abstract-short" style="display: inline;"> The estimation of multi-parametric quantitative maps from Magnetic Resonance Fingerprinting (MRF) compressed sampled acquisitions, albeit successful, remains a challenge due to the high underspampling rate and artifacts naturally occuring during image reconstruction. Whilst state-of-the-art DL methods can successfully address the task, to fully exploit their capabilities they often require trainin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19866v1-abstract-full').style.display = 'inline'; document.getElementById('2407.19866v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.19866v1-abstract-full" style="display: none;"> The estimation of multi-parametric quantitative maps from Magnetic Resonance Fingerprinting (MRF) compressed sampled acquisitions, albeit successful, remains a challenge due to the high underspampling rate and artifacts naturally occuring during image reconstruction. Whilst state-of-the-art DL methods can successfully address the task, to fully exploit their capabilities they often require training on a paired dataset, in an area where ground truth is seldom available. In this work, we propose a method that combines a deep image prior (DIP) module that, without ground truth and in conjunction with a Bloch consistency enforcing autoencoder, can tackle the problem, resulting in a method faster and of equivalent or better accuracy than DIP-MRF. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19866v1-abstract-full').style.display = 'none'; document.getElementById('2407.19866v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages, 3 figures 1 table, presented at ISBI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.08855">arXiv:2407.08855</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.08855">pdf</a>, <a href="https://arxiv.org/format/2407.08855">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> BraTS-PEDs: Results of the Multi-Consortium International Pediatric Brain Tumor Segmentation Challenge 2023 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kazerooni%2C+A+F">Anahita Fathi Kazerooni</a>, <a href="/search/cs?searchtype=author&amp;query=Khalili%2C+N">Nastaran Khalili</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xinyang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Haldar%2C+D">Debanjan Haldar</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhifan Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Zapaishchykova%2C+A">Anna Zapaishchykova</a>, <a href="/search/cs?searchtype=author&amp;query=Pavaine%2C+J">Julija Pavaine</a>, <a href="/search/cs?searchtype=author&amp;query=Shah%2C+L+M">Lubdha M. Shah</a>, <a href="/search/cs?searchtype=author&amp;query=Jones%2C+B+V">Blaise V. Jones</a>, <a href="/search/cs?searchtype=author&amp;query=Sheth%2C+N">Nakul Sheth</a>, <a href="/search/cs?searchtype=author&amp;query=Prabhu%2C+S+P">Sanjay P. Prabhu</a>, <a href="/search/cs?searchtype=author&amp;query=McAllister%2C+A+S">Aaron S. McAllister</a>, <a href="/search/cs?searchtype=author&amp;query=Tu%2C+W">Wenxin Tu</a>, <a href="/search/cs?searchtype=author&amp;query=Nandolia%2C+K+K">Khanak K. Nandolia</a>, <a href="/search/cs?searchtype=author&amp;query=Rodriguez%2C+A+F">Andres F. Rodriguez</a>, <a href="/search/cs?searchtype=author&amp;query=Shaikh%2C+I+S">Ibraheem Salman Shaikh</a>, <a href="/search/cs?searchtype=author&amp;query=Montano%2C+M+S">Mariana Sanchez Montano</a>, <a href="/search/cs?searchtype=author&amp;query=Lai%2C+H+A">Hollie Anne Lai</a>, <a href="/search/cs?searchtype=author&amp;query=Adewole%2C+M">Maruf Adewole</a>, <a href="/search/cs?searchtype=author&amp;query=Albrecht%2C+J">Jake Albrecht</a>, <a href="/search/cs?searchtype=author&amp;query=Anazodo%2C+U">Udunna Anazodo</a>, <a href="/search/cs?searchtype=author&amp;query=Anderson%2C+H">Hannah Anderson</a>, <a href="/search/cs?searchtype=author&amp;query=Anwar%2C+S+M">Syed Muhammed Anwar</a>, <a href="/search/cs?searchtype=author&amp;query=Aristizabal%2C+A">Alejandro Aristizabal</a>, <a href="/search/cs?searchtype=author&amp;query=Bagheri%2C+S">Sina Bagheri</a> , et al. (55 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.08855v2-abstract-short" style="display: inline;"> Pediatric central nervous system tumors are the leading cause of cancer-related deaths in children. The five-year survival rate for high-grade glioma in children is less than 20%. The development of new treatments is dependent upon multi-institutional collaborative clinical trials requiring reproducible and accurate centralized response assessment. We present the results of the BraTS-PEDs 2023 cha&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.08855v2-abstract-full').style.display = 'inline'; document.getElementById('2407.08855v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.08855v2-abstract-full" style="display: none;"> Pediatric central nervous system tumors are the leading cause of cancer-related deaths in children. The five-year survival rate for high-grade glioma in children is less than 20%. The development of new treatments is dependent upon multi-institutional collaborative clinical trials requiring reproducible and accurate centralized response assessment. We present the results of the BraTS-PEDs 2023 challenge, the first Brain Tumor Segmentation (BraTS) challenge focused on pediatric brain tumors. This challenge utilized data acquired from multiple international consortia dedicated to pediatric neuro-oncology and clinical trials. BraTS-PEDs 2023 aimed to evaluate volumetric segmentation algorithms for pediatric brain gliomas from magnetic resonance imaging using standardized quantitative performance evaluation metrics employed across the BraTS 2023 challenges. The top-performing AI approaches for pediatric tumor analysis included ensembles of nnU-Net and Swin UNETR, Auto3DSeg, or nnU-Net with a self-supervised framework. The BraTSPEDs 2023 challenge fostered collaboration between clinicians (neuro-oncologists, neuroradiologists) and AI/imaging scientists, promoting faster data sharing and the development of automated volumetric analysis techniques. These advancements could significantly benefit clinical trials and improve the care of children with brain tumors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.08855v2-abstract-full').style.display = 'none'; document.getElementById('2407.08855v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.07488">arXiv:2407.07488</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.07488">pdf</a>, <a href="https://arxiv.org/format/2407.07488">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> FUNAvg: Federated Uncertainty Weighted Averaging for Datasets with Diverse Labels </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=T%C3%B6lle%2C+M">Malte T枚lle</a>, <a href="/search/cs?searchtype=author&amp;query=Navarro%2C+F">Fernando Navarro</a>, <a href="/search/cs?searchtype=author&amp;query=Eble%2C+S">Sebastian Eble</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+I">Ivo Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Engelhardt%2C+S">Sandy Engelhardt</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.07488v1-abstract-short" style="display: inline;"> Federated learning is one popular paradigm to train a joint model in a distributed, privacy-preserving environment. But partial annotations pose an obstacle meaning that categories of labels are heterogeneous over clients. We propose to learn a joint backbone in a federated manner, while each site receives its own multi-label segmentation head. By using Bayesian techniques we observe that the diff&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.07488v1-abstract-full').style.display = 'inline'; document.getElementById('2407.07488v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.07488v1-abstract-full" style="display: none;"> Federated learning is one popular paradigm to train a joint model in a distributed, privacy-preserving environment. But partial annotations pose an obstacle meaning that categories of labels are heterogeneous over clients. We propose to learn a joint backbone in a federated manner, while each site receives its own multi-label segmentation head. By using Bayesian techniques we observe that the different segmentation heads although only trained on the individual client&#39;s labels also learn information about the other labels not present at the respective site. This information is encoded in their predictive uncertainty. To obtain a final prediction we leverage this uncertainty and perform a weighted averaging of the ensemble of distributed segmentation heads, which allows us to segment &#34;locally unknown&#34; structures. With our method, which we refer to as FUNAvg, we are even on-par with the models trained and tested on the same dataset on average. The code is publicly available at https://github.com/Cardio-AI/FUNAvg. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.07488v1-abstract-full').style.display = 'none'; document.getElementById('2407.07488v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at MICCAI24</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.05842">arXiv:2407.05842</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.05842">pdf</a>, <a href="https://arxiv.org/format/2407.05842">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> 3D Vessel Graph Generation Using Denoising Diffusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Prabhakar%2C+C">Chinmay Prabhakar</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Musio%2C+F">Fabio Musio</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Amiranashvili%2C+T">Tamaz Amiranashvili</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.05842v1-abstract-short" style="display: inline;"> Blood vessel networks, represented as 3D graphs, help predict disease biomarkers, simulate blood flow, and aid in synthetic image generation, relevant in both clinical and pre-clinical settings. However, generating realistic vessel graphs that correspond to an anatomy of interest is challenging. Previous methods aimed at generating vessel trees mostly in an autoregressive style and could not be ap&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.05842v1-abstract-full').style.display = 'inline'; document.getElementById('2407.05842v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.05842v1-abstract-full" style="display: none;"> Blood vessel networks, represented as 3D graphs, help predict disease biomarkers, simulate blood flow, and aid in synthetic image generation, relevant in both clinical and pre-clinical settings. However, generating realistic vessel graphs that correspond to an anatomy of interest is challenging. Previous methods aimed at generating vessel trees mostly in an autoregressive style and could not be applied to vessel graphs with cycles such as capillaries or specific anatomical structures such as the Circle of Willis. Addressing this gap, we introduce the first application of \textit{denoising diffusion models} in 3D vessel graph generation. Our contributions include a novel, two-stage generation method that sequentially denoises node coordinates and edges. We experiment with two real-world vessel datasets, consisting of microscopic capillaries and major cerebral vessels, and demonstrate the generalizability of our method for producing diverse, novel, and anatomically plausible vessel graphs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.05842v1-abstract-full').style.display = 'none'; document.getElementById('2407.05842v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to MICCAI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.18435">arXiv:2405.18435</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.18435">pdf</a>, <a href="https://arxiv.org/format/2405.18435">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> QUBIQ: Uncertainty Quantification for Biomedical Image Segmentation Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Navarro%2C+F">Fernando Navarro</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Bayat%2C+A">Amirhossein Bayat</a>, <a href="/search/cs?searchtype=author&amp;query=Das%2C+D">Dhritiman Das</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+X">Xiaobin Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmer%2C+L">Lucas Zimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Amiranashvili%2C+T">Tamaz Amiranashvili</a>, <a href="/search/cs?searchtype=author&amp;query=Prabhakar%2C+C">Chinmay Prabhakar</a>, <a href="/search/cs?searchtype=author&amp;query=Berger%2C+C">Christoph Berger</a>, <a href="/search/cs?searchtype=author&amp;query=Weidner%2C+J">Jonas Weidner</a>, <a href="/search/cs?searchtype=author&amp;query=Alonso-Basant%2C+M">Michelle Alonso-Basant</a>, <a href="/search/cs?searchtype=author&amp;query=Rashid%2C+A">Arif Rashid</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Adel%2C+W">Wesam Adel</a>, <a href="/search/cs?searchtype=author&amp;query=Ali%2C+D">Deniz Ali</a>, <a href="/search/cs?searchtype=author&amp;query=Baheti%2C+B">Bhakti Baheti</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+Y">Yingbin Bai</a>, <a href="/search/cs?searchtype=author&amp;query=Bhatt%2C+I">Ishaan Bhatt</a>, <a href="/search/cs?searchtype=author&amp;query=Cetindag%2C+S+C">Sabri Can Cetindag</a> , et al. (55 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.18435v2-abstract-short" style="display: inline;"> Uncertainty in medical image segmentation tasks, especially inter-rater variability, arising from differences in interpretations and annotations by various experts, presents a significant challenge in achieving consistent and reliable image segmentation. This variability not only reflects the inherent complexity and subjective nature of medical image interpretation but also directly impacts the de&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.18435v2-abstract-full').style.display = 'inline'; document.getElementById('2405.18435v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.18435v2-abstract-full" style="display: none;"> Uncertainty in medical image segmentation tasks, especially inter-rater variability, arising from differences in interpretations and annotations by various experts, presents a significant challenge in achieving consistent and reliable image segmentation. This variability not only reflects the inherent complexity and subjective nature of medical image interpretation but also directly impacts the development and evaluation of automated segmentation algorithms. Accurately modeling and quantifying this variability is essential for enhancing the robustness and clinical applicability of these algorithms. We report the set-up and summarize the benchmark results of the Quantification of Uncertainties in Biomedical Image Quantification Challenge (QUBIQ), which was organized in conjunction with International Conferences on Medical Image Computing and Computer-Assisted Intervention (MICCAI) 2020 and 2021. The challenge focuses on the uncertainty quantification of medical image segmentation which considers the omnipresence of inter-rater variability in imaging datasets. The large collection of images with multi-rater annotations features various modalities such as MRI and CT; various organs such as the brain, prostate, kidney, and pancreas; and different image dimensions 2D-vs-3D. A total of 24 teams submitted different solutions to the problem, combining various baseline models, Bayesian neural networks, and ensemble model techniques. The obtained results indicate the importance of the ensemble models, as well as the need for further research to develop efficient 3D methods for uncertainty quantification methods in 3D segmentation tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.18435v2-abstract-full').style.display = 'none'; document.getElementById('2405.18435v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">initial technical report</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.18383">arXiv:2405.18383</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.18383">pdf</a>, <a href="https://arxiv.org/format/2405.18383">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Brain Tumor Segmentation (BraTS) Challenge 2024: Meningioma Radiotherapy Planning Automated Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=LaBella%2C+D">Dominic LaBella</a>, <a href="/search/cs?searchtype=author&amp;query=Schumacher%2C+K">Katherine Schumacher</a>, <a href="/search/cs?searchtype=author&amp;query=Mix%2C+M">Michael Mix</a>, <a href="/search/cs?searchtype=author&amp;query=Leu%2C+K">Kevin Leu</a>, <a href="/search/cs?searchtype=author&amp;query=McBurney-Lin%2C+S">Shan McBurney-Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Nedelec%2C+P">Pierre Nedelec</a>, <a href="/search/cs?searchtype=author&amp;query=Villanueva-Meyer%2C+J">Javier Villanueva-Meyer</a>, <a href="/search/cs?searchtype=author&amp;query=Shapey%2C+J">Jonathan Shapey</a>, <a href="/search/cs?searchtype=author&amp;query=Vercauteren%2C+T">Tom Vercauteren</a>, <a href="/search/cs?searchtype=author&amp;query=Chia%2C+K">Kazumi Chia</a>, <a href="/search/cs?searchtype=author&amp;query=Al-Salihi%2C+O">Omar Al-Salihi</a>, <a href="/search/cs?searchtype=author&amp;query=Leu%2C+J">Justin Leu</a>, <a href="/search/cs?searchtype=author&amp;query=Halasz%2C+L">Lia Halasz</a>, <a href="/search/cs?searchtype=author&amp;query=Velichko%2C+Y">Yury Velichko</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chunhao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Kirkpatrick%2C+J">John Kirkpatrick</a>, <a href="/search/cs?searchtype=author&amp;query=Floyd%2C+S">Scott Floyd</a>, <a href="/search/cs?searchtype=author&amp;query=Reitman%2C+Z+J">Zachary J. Reitman</a>, <a href="/search/cs?searchtype=author&amp;query=Mullikin%2C+T">Trey Mullikin</a>, <a href="/search/cs?searchtype=author&amp;query=Bagci%2C+U">Ulas Bagci</a>, <a href="/search/cs?searchtype=author&amp;query=Sachdev%2C+S">Sean Sachdev</a>, <a href="/search/cs?searchtype=author&amp;query=Hattangadi-Gluth%2C+J+A">Jona A. Hattangadi-Gluth</a>, <a href="/search/cs?searchtype=author&amp;query=Seibert%2C+T">Tyler Seibert</a>, <a href="/search/cs?searchtype=author&amp;query=Farid%2C+N">Nikdokht Farid</a>, <a href="/search/cs?searchtype=author&amp;query=Puett%2C+C">Connor Puett</a> , et al. (45 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.18383v2-abstract-short" style="display: inline;"> The 2024 Brain Tumor Segmentation Meningioma Radiotherapy (BraTS-MEN-RT) challenge aims to advance automated segmentation algorithms using the largest known multi-institutional dataset of radiotherapy planning brain MRIs with expert-annotated target labels for patients with intact or postoperative meningioma that underwent either conventional external beam radiotherapy or stereotactic radiosurgery&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.18383v2-abstract-full').style.display = 'inline'; document.getElementById('2405.18383v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.18383v2-abstract-full" style="display: none;"> The 2024 Brain Tumor Segmentation Meningioma Radiotherapy (BraTS-MEN-RT) challenge aims to advance automated segmentation algorithms using the largest known multi-institutional dataset of radiotherapy planning brain MRIs with expert-annotated target labels for patients with intact or postoperative meningioma that underwent either conventional external beam radiotherapy or stereotactic radiosurgery. Each case includes a defaced 3D post-contrast T1-weighted radiotherapy planning MRI in its native acquisition space, accompanied by a single-label &#34;target volume&#34; representing the gross tumor volume (GTV) and any at-risk postoperative site. Target volume annotations adhere to established radiotherapy planning protocols, ensuring consistency across cases and institutions. For preoperative meningiomas, the target volume encompasses the entire GTV and associated nodular dural tail, while for postoperative cases, it includes at-risk resection cavity margins as determined by the treating institution. Case annotations were reviewed and approved by expert neuroradiologists and radiation oncologists. Participating teams will develop, containerize, and evaluate automated segmentation models using this comprehensive dataset. Model performance will be assessed using an adapted lesion-wise Dice Similarity Coefficient and the 95% Hausdorff distance. The top-performing teams will be recognized at the Medical Image Computing and Computer Assisted Intervention Conference in October 2024. BraTS-MEN-RT is expected to significantly advance automated radiotherapy planning by enabling precise tumor segmentation and facilitating tailored treatment, ultimately improving patient outcomes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.18383v2-abstract-full').style.display = 'none'; document.getElementById('2405.18383v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 9 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.18368">arXiv:2405.18368</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.18368">pdf</a>, <a href="https://arxiv.org/format/2405.18368">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> The 2024 Brain Tumor Segmentation (BraTS) Challenge: Glioma Segmentation on Post-treatment MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=de+Verdier%2C+M+C">Maria Correia de Verdier</a>, <a href="/search/cs?searchtype=author&amp;query=Saluja%2C+R">Rachit Saluja</a>, <a href="/search/cs?searchtype=author&amp;query=Gagnon%2C+L">Louis Gagnon</a>, <a href="/search/cs?searchtype=author&amp;query=LaBella%2C+D">Dominic LaBella</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwall Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Tahon%2C+N+H">Nourel Hoda Tahon</a>, <a href="/search/cs?searchtype=author&amp;query=Foltyn-Dumitru%2C+M">Martha Foltyn-Dumitru</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jikai Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Alafif%2C+M">Maram Alafif</a>, <a href="/search/cs?searchtype=author&amp;query=Baig%2C+S">Saif Baig</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+K">Ken Chang</a>, <a href="/search/cs?searchtype=author&amp;query=D%27Anna%2C+G">Gennaro D&#39;Anna</a>, <a href="/search/cs?searchtype=author&amp;query=Deptula%2C+L">Lisa Deptula</a>, <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+D">Diviya Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Haider%2C+M+A">Muhammad Ammar Haider</a>, <a href="/search/cs?searchtype=author&amp;query=Hussain%2C+A">Ali Hussain</a>, <a href="/search/cs?searchtype=author&amp;query=Iv%2C+M">Michael Iv</a>, <a href="/search/cs?searchtype=author&amp;query=Kontzialis%2C+M">Marinos Kontzialis</a>, <a href="/search/cs?searchtype=author&amp;query=Manning%2C+P">Paul Manning</a>, <a href="/search/cs?searchtype=author&amp;query=Moodi%2C+F">Farzan Moodi</a>, <a href="/search/cs?searchtype=author&amp;query=Nunes%2C+T">Teresa Nunes</a>, <a href="/search/cs?searchtype=author&amp;query=Simon%2C+A">Aaron Simon</a>, <a href="/search/cs?searchtype=author&amp;query=Sollmann%2C+N">Nico Sollmann</a>, <a href="/search/cs?searchtype=author&amp;query=Vu%2C+D">David Vu</a>, <a href="/search/cs?searchtype=author&amp;query=Adewole%2C+M">Maruf Adewole</a> , et al. (60 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.18368v1-abstract-short" style="display: inline;"> Gliomas are the most common malignant primary brain tumors in adults and one of the deadliest types of cancer. There are many challenges in treatment and monitoring due to the genetic diversity and high intrinsic heterogeneity in appearance, shape, histology, and treatment response. Treatments include surgery, radiation, and systemic therapies, with magnetic resonance imaging (MRI) playing a key r&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.18368v1-abstract-full').style.display = 'inline'; document.getElementById('2405.18368v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.18368v1-abstract-full" style="display: none;"> Gliomas are the most common malignant primary brain tumors in adults and one of the deadliest types of cancer. There are many challenges in treatment and monitoring due to the genetic diversity and high intrinsic heterogeneity in appearance, shape, histology, and treatment response. Treatments include surgery, radiation, and systemic therapies, with magnetic resonance imaging (MRI) playing a key role in treatment planning and post-treatment longitudinal assessment. The 2024 Brain Tumor Segmentation (BraTS) challenge on post-treatment glioma MRI will provide a community standard and benchmark for state-of-the-art automated segmentation models based on the largest expert-annotated post-treatment glioma MRI dataset. Challenge competitors will develop automated segmentation models to predict four distinct tumor sub-regions consisting of enhancing tissue (ET), surrounding non-enhancing T2/fluid-attenuated inversion recovery (FLAIR) hyperintensity (SNFH), non-enhancing tumor core (NETC), and resection cavity (RC). Models will be evaluated on separate validation and test datasets using standardized performance metrics utilized across the BraTS 2024 cluster of challenges, including lesion-wise Dice Similarity Coefficient and Hausdorff Distance. Models developed during this challenge will advance the field of automated MRI segmentation and contribute to their integration into clinical practice, ultimately enhancing patient care. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.18368v1-abstract-full').style.display = 'none'; document.getElementById('2405.18368v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 4 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.16460">arXiv:2405.16460</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.16460">pdf</a>, <a href="https://arxiv.org/format/2405.16460">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Probabilistic Contrastive Learning with Explicit Concentration on the Hypersphere </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Ouyang%2C+C">Cheng Ouyang</a>, <a href="/search/cs?searchtype=author&amp;query=Amiranashvili%2C+T">Tamaz Amiranashvili</a>, <a href="/search/cs?searchtype=author&amp;query=Rosen%2C+M+S">Matthew S. Rosen</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Iglesias%2C+J+E">Juan Eugenio Iglesias</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.16460v1-abstract-short" style="display: inline;"> Self-supervised contrastive learning has predominantly adopted deterministic methods, which are not suited for environments characterized by uncertainty and noise. This paper introduces a new perspective on incorporating uncertainty into contrastive learning by embedding representations within a spherical space, inspired by the von Mises-Fisher distribution (vMF). We introduce an unnormalized form&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.16460v1-abstract-full').style.display = 'inline'; document.getElementById('2405.16460v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.16460v1-abstract-full" style="display: none;"> Self-supervised contrastive learning has predominantly adopted deterministic methods, which are not suited for environments characterized by uncertainty and noise. This paper introduces a new perspective on incorporating uncertainty into contrastive learning by embedding representations within a spherical space, inspired by the von Mises-Fisher distribution (vMF). We introduce an unnormalized form of vMF and leverage the concentration parameter, kappa, as a direct, interpretable measure to quantify uncertainty explicitly. This approach not only provides a probabilistic interpretation of the embedding space but also offers a method to calibrate model confidence against varying levels of data corruption and characteristics. Our empirical results demonstrate that the estimated concentration parameter correlates strongly with the degree of unforeseen data corruption encountered at test time, enables failure analysis, and enhances existing out-of-distribution detection methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.16460v1-abstract-full').style.display = 'none'; document.getElementById('2405.16460v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">technical report</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.09787">arXiv:2405.09787</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.09787">pdf</a>, <a href="https://arxiv.org/format/2405.09787">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Analysis of the BraTS 2023 Intracranial Meningioma Segmentation Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=LaBella%2C+D">Dominic LaBella</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Khanna%2C+O">Omaditya Khanna</a>, <a href="/search/cs?searchtype=author&amp;query=McBurney-Lin%2C+S">Shan McBurney-Lin</a>, <a href="/search/cs?searchtype=author&amp;query=McLean%2C+R">Ryan McLean</a>, <a href="/search/cs?searchtype=author&amp;query=Nedelec%2C+P">Pierre Nedelec</a>, <a href="/search/cs?searchtype=author&amp;query=Rashid%2C+A">Arif Rashid</a>, <a href="/search/cs?searchtype=author&amp;query=Tahon%2C+N+H">Nourel Hoda Tahon</a>, <a href="/search/cs?searchtype=author&amp;query=Altes%2C+T">Talissa Altes</a>, <a href="/search/cs?searchtype=author&amp;query=Bhalerao%2C+R">Radhika Bhalerao</a>, <a href="/search/cs?searchtype=author&amp;query=Dhemesh%2C+Y">Yaseen Dhemesh</a>, <a href="/search/cs?searchtype=author&amp;query=Godfrey%2C+D">Devon Godfrey</a>, <a href="/search/cs?searchtype=author&amp;query=Hilal%2C+F">Fathi Hilal</a>, <a href="/search/cs?searchtype=author&amp;query=Floyd%2C+S">Scott Floyd</a>, <a href="/search/cs?searchtype=author&amp;query=Janas%2C+A">Anastasia Janas</a>, <a href="/search/cs?searchtype=author&amp;query=Kazerooni%2C+A+F">Anahita Fathi Kazerooni</a>, <a href="/search/cs?searchtype=author&amp;query=Kirkpatrick%2C+J">John Kirkpatrick</a>, <a href="/search/cs?searchtype=author&amp;query=Kent%2C+C">Collin Kent</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Leu%2C+K">Kevin Leu</a>, <a href="/search/cs?searchtype=author&amp;query=Maleki%2C+N">Nazanin Maleki</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Pajot%2C+M">Maxence Pajot</a>, <a href="/search/cs?searchtype=author&amp;query=Reitman%2C+Z+J">Zachary J. Reitman</a>, <a href="/search/cs?searchtype=author&amp;query=Rudie%2C+J+D">Jeffrey D. Rudie</a> , et al. (96 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.09787v1-abstract-short" style="display: inline;"> We describe the design and results from the BraTS 2023 Intracranial Meningioma Segmentation Challenge. The BraTS Meningioma Challenge differed from prior BraTS Glioma challenges in that it focused on meningiomas, which are typically benign extra-axial tumors with diverse radiologic and anatomical presentation and a propensity for multiplicity. Nine participating teams each developed deep-learning&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.09787v1-abstract-full').style.display = 'inline'; document.getElementById('2405.09787v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.09787v1-abstract-full" style="display: none;"> We describe the design and results from the BraTS 2023 Intracranial Meningioma Segmentation Challenge. The BraTS Meningioma Challenge differed from prior BraTS Glioma challenges in that it focused on meningiomas, which are typically benign extra-axial tumors with diverse radiologic and anatomical presentation and a propensity for multiplicity. Nine participating teams each developed deep-learning automated segmentation models using image data from the largest multi-institutional systematically expert annotated multilabel multi-sequence meningioma MRI dataset to date, which included 1000 training set cases, 141 validation set cases, and 283 hidden test set cases. Each case included T2, T2/FLAIR, T1, and T1Gd brain MRI sequences with associated tumor compartment labels delineating enhancing tumor, non-enhancing tumor, and surrounding non-enhancing T2/FLAIR hyperintensity. Participant automated segmentation models were evaluated and ranked based on a scoring system evaluating lesion-wise metrics including dice similarity coefficient (DSC) and 95% Hausdorff Distance. The top ranked team had a lesion-wise median dice similarity coefficient (DSC) of 0.976, 0.976, and 0.964 for enhancing tumor, tumor core, and whole tumor, respectively and a corresponding average DSC of 0.899, 0.904, and 0.871, respectively. These results serve as state-of-the-art benchmarks for future pre-operative meningioma automated segmentation algorithms. Additionally, we found that 1286 of 1424 cases (90.3%) had at least 1 compartment voxel abutting the edge of the skull-stripped image edge, which requires further investigation into optimal pre-processing face anonymization steps. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.09787v1-abstract-full').style.display = 'none'; document.getElementById('2405.09787v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 11 tables, 10 figures, MICCAI</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.15009">arXiv:2404.15009</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.15009">pdf</a>, <a href="https://arxiv.org/format/2404.15009">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> The Brain Tumor Segmentation in Pediatrics (BraTS-PEDs) Challenge: Focus on Pediatrics (CBTN-CONNECT-DIPGR-ASNR-MICCAI BraTS-PEDs) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kazerooni%2C+A+F">Anahita Fathi Kazerooni</a>, <a href="/search/cs?searchtype=author&amp;query=Khalili%2C+N">Nastaran Khalili</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xinyang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Gandhi%2C+D">Deep Gandhi</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhifan Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Anwar%2C+S+M">Syed Muhammed Anwar</a>, <a href="/search/cs?searchtype=author&amp;query=Albrecht%2C+J">Jake Albrecht</a>, <a href="/search/cs?searchtype=author&amp;query=Adewole%2C+M">Maruf Adewole</a>, <a href="/search/cs?searchtype=author&amp;query=Anazodo%2C+U">Udunna Anazodo</a>, <a href="/search/cs?searchtype=author&amp;query=Anderson%2C+H">Hannah Anderson</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Bergquist%2C+T">Timothy Bergquist</a>, <a href="/search/cs?searchtype=author&amp;query=Borja%2C+A+J">Austin J. Borja</a>, <a href="/search/cs?searchtype=author&amp;query=Calabrese%2C+E">Evan Calabrese</a>, <a href="/search/cs?searchtype=author&amp;query=Chung%2C+V">Verena Chung</a>, <a href="/search/cs?searchtype=author&amp;query=Conte%2C+G">Gian-Marco Conte</a>, <a href="/search/cs?searchtype=author&amp;query=Dako%2C+F">Farouk Dako</a>, <a href="/search/cs?searchtype=author&amp;query=Eddy%2C+J">James Eddy</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Familiar%2C+A">Ariana Familiar</a>, <a href="/search/cs?searchtype=author&amp;query=Farahani%2C+K">Keyvan Farahani</a>, <a href="/search/cs?searchtype=author&amp;query=Franson%2C+A">Andrea Franson</a>, <a href="/search/cs?searchtype=author&amp;query=Gottipati%2C+A">Anurag Gottipati</a>, <a href="/search/cs?searchtype=author&amp;query=Haldar%2C+S">Shuvanjan Haldar</a>, <a href="/search/cs?searchtype=author&amp;query=Iglesias%2C+J+E">Juan Eugenio Iglesias</a> , et al. (46 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.15009v4-abstract-short" style="display: inline;"> Pediatric tumors of the central nervous system are the most common cause of cancer-related death in children. The five-year survival rate for high-grade gliomas in children is less than 20%. Due to their rarity, the diagnosis of these entities is often delayed, their treatment is mainly based on historic treatment concepts, and clinical trials require multi-institutional collaborations. Here we pr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.15009v4-abstract-full').style.display = 'inline'; document.getElementById('2404.15009v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.15009v4-abstract-full" style="display: none;"> Pediatric tumors of the central nervous system are the most common cause of cancer-related death in children. The five-year survival rate for high-grade gliomas in children is less than 20%. Due to their rarity, the diagnosis of these entities is often delayed, their treatment is mainly based on historic treatment concepts, and clinical trials require multi-institutional collaborations. Here we present the CBTN-CONNECT-DIPGR-ASNR-MICCAI BraTS-PEDs challenge, focused on pediatric brain tumors with data acquired across multiple international consortia dedicated to pediatric neuro-oncology and clinical trials. The CBTN-CONNECT-DIPGR-ASNR-MICCAI BraTS-PEDs challenge brings together clinicians and AI/imaging scientists to lead to faster development of automated segmentation techniques that could benefit clinical trials, and ultimately the care of children with brain tumors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.15009v4-abstract-full').style.display = 'none'; document.getElementById('2404.15009v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:2305.17033</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.03618">arXiv:2404.03618</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.03618">pdf</a>, <a href="https://arxiv.org/format/2404.03618">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DeViDe: Faceted medical knowledge for improved medical vision-language pre-training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Luo%2C+H">Haozhe Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Z">Ziyu Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Royer%2C+C">Corentin Royer</a>, <a href="/search/cs?searchtype=author&amp;query=Sekuboyina%2C+A">Anjany Sekuboyina</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.03618v1-abstract-short" style="display: inline;"> Vision-language pre-training for chest X-rays has made significant strides, primarily by utilizing paired radiographs and radiology reports. However, existing approaches often face challenges in encoding medical knowledge effectively. While radiology reports provide insights into the current disease manifestation, medical definitions (as used by contemporary methods) tend to be overly abstract, cr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.03618v1-abstract-full').style.display = 'inline'; document.getElementById('2404.03618v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.03618v1-abstract-full" style="display: none;"> Vision-language pre-training for chest X-rays has made significant strides, primarily by utilizing paired radiographs and radiology reports. However, existing approaches often face challenges in encoding medical knowledge effectively. While radiology reports provide insights into the current disease manifestation, medical definitions (as used by contemporary methods) tend to be overly abstract, creating a gap in knowledge. To address this, we propose DeViDe, a novel transformer-based method that leverages radiographic descriptions from the open web. These descriptions outline general visual characteristics of diseases in radiographs, and when combined with abstract definitions and radiology reports, provide a holistic snapshot of knowledge. DeViDe incorporates three key features for knowledge-augmented vision language alignment: First, a large-language model-based augmentation is employed to homogenise medical knowledge from diverse sources. Second, this knowledge is aligned with image information at various levels of granularity. Third, a novel projection layer is proposed to handle the complexity of aligning each image with multiple descriptions arising in a multi-label setting. In zero-shot settings, DeViDe performs comparably to fully supervised models on external datasets and achieves state-of-the-art results on three large-scale datasets. Additionally, fine-tuning DeViDe on four downstream tasks and six segmentation tasks showcases its superior performance across data from diverse distributions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.03618v1-abstract-full').style.display = 'none'; document.getElementById('2404.03618v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: text overlap with arXiv:2208.04060 by other authors</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.19425">arXiv:2403.19425</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.19425">pdf</a>, <a href="https://arxiv.org/ps/2403.19425">ps</a>, <a href="https://arxiv.org/format/2403.19425">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A Robust Ensemble Algorithm for Ischemic Stroke Lesion Segmentation: Generalizability and Clinical Utility Beyond the ISLES Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Liew%2C+S">Sook-Lei Liew</a>, <a href="/search/cs?searchtype=author&amp;query=Hutton%2C+A">Alexandre Hutton</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Kaesmacher%2C+J">Johannes Kaesmacher</a>, <a href="/search/cs?searchtype=author&amp;query=Hanning%2C+U">Uta Hanning</a>, <a href="/search/cs?searchtype=author&amp;query=Hakim%2C+A">Arsany Hakim</a>, <a href="/search/cs?searchtype=author&amp;query=Zubal%2C+R">Richard Zubal</a>, <a href="/search/cs?searchtype=author&amp;query=Valenzuela%2C+W">Waldo Valenzuela</a>, <a href="/search/cs?searchtype=author&amp;query=Robben%2C+D">David Robben</a>, <a href="/search/cs?searchtype=author&amp;query=Sima%2C+D+M">Diana M. Sima</a>, <a href="/search/cs?searchtype=author&amp;query=Anania%2C+V">Vincenzo Anania</a>, <a href="/search/cs?searchtype=author&amp;query=Brys%2C+A">Arne Brys</a>, <a href="/search/cs?searchtype=author&amp;query=Meakin%2C+J+A">James A. Meakin</a>, <a href="/search/cs?searchtype=author&amp;query=Mickan%2C+A">Anne Mickan</a>, <a href="/search/cs?searchtype=author&amp;query=Broocks%2C+G">Gabriel Broocks</a>, <a href="/search/cs?searchtype=author&amp;query=Heitkamp%2C+C">Christian Heitkamp</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+S">Shengbo Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+K">Kongming Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Ziji Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Siddiquee%2C+M+M+R">Md Mahfuzur Rahman Siddiquee</a>, <a href="/search/cs?searchtype=author&amp;query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/cs?searchtype=author&amp;query=Ashtari%2C+P">Pooya Ashtari</a>, <a href="/search/cs?searchtype=author&amp;query=Van+Huffel%2C+S">Sabine Van Huffel</a> , et al. (33 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.19425v2-abstract-short" style="display: inline;"> Diffusion-weighted MRI (DWI) is essential for stroke diagnosis, treatment decisions, and prognosis. However, image and disease variability hinder the development of generalizable AI algorithms with clinical value. We address this gap by presenting a novel ensemble algorithm derived from the 2022 Ischemic Stroke Lesion Segmentation (ISLES) challenge. ISLES&#39;22 provided 400 patient scans with ischemi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19425v2-abstract-full').style.display = 'inline'; document.getElementById('2403.19425v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.19425v2-abstract-full" style="display: none;"> Diffusion-weighted MRI (DWI) is essential for stroke diagnosis, treatment decisions, and prognosis. However, image and disease variability hinder the development of generalizable AI algorithms with clinical value. We address this gap by presenting a novel ensemble algorithm derived from the 2022 Ischemic Stroke Lesion Segmentation (ISLES) challenge. ISLES&#39;22 provided 400 patient scans with ischemic stroke from various medical centers, facilitating the development of a wide range of cutting-edge segmentation algorithms by the research community. Through collaboration with leading teams, we combined top-performing algorithms into an ensemble model that overcomes the limitations of individual solutions. Our ensemble model achieved superior ischemic lesion detection and segmentation accuracy on our internal test set compared to individual algorithms. This accuracy generalized well across diverse image and disease variables. Furthermore, the model excelled in extracting clinical biomarkers. Notably, in a Turing-like test, neuroradiologists consistently preferred the algorithm&#39;s segmentations over manual expert efforts, highlighting increased comprehensiveness and precision. Validation using a real-world external dataset (N=1686) confirmed the model&#39;s generalizability. The algorithm&#39;s outputs also demonstrated strong correlations with clinical scores (admission NIHSS and 90-day mRS) on par with or exceeding expert-derived results, underlining its clinical relevance. This study offers two key findings. First, we present an ensemble algorithm (https://github.com/Tabrisrei/ISLES22_Ensemble) that detects and segments ischemic stroke lesions on DWI across diverse scenarios on par with expert (neuro)radiologists. Second, we show the potential for biomedical challenge outputs to extend beyond the challenge&#39;s initial objectives, demonstrating their real-world clinical applicability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19425v2-abstract-full').style.display = 'none'; document.getElementById('2403.19425v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.17834">arXiv:2403.17834</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.17834">pdf</a>, <a href="https://arxiv.org/format/2403.17834">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hamamci%2C+I+E">Ibrahim Ethem Hamamci</a>, <a href="/search/cs?searchtype=author&amp;query=Er%2C+S">Sezgin Er</a>, <a href="/search/cs?searchtype=author&amp;query=Almas%2C+F">Furkan Almas</a>, <a href="/search/cs?searchtype=author&amp;query=Simsek%2C+A+G">Ayse Gulnihan Simsek</a>, <a href="/search/cs?searchtype=author&amp;query=Esirgun%2C+S+N">Sevval Nil Esirgun</a>, <a href="/search/cs?searchtype=author&amp;query=Dogan%2C+I">Irem Dogan</a>, <a href="/search/cs?searchtype=author&amp;query=Dasdelen%2C+M+F">Muhammed Furkan Dasdelen</a>, <a href="/search/cs?searchtype=author&amp;query=Durugol%2C+O+F">Omer Faruk Durugol</a>, <a href="/search/cs?searchtype=author&amp;query=Wittmann%2C+B">Bastian Wittmann</a>, <a href="/search/cs?searchtype=author&amp;query=Amiranashvili%2C+T">Tamaz Amiranashvili</a>, <a href="/search/cs?searchtype=author&amp;query=Simsar%2C+E">Enis Simsar</a>, <a href="/search/cs?searchtype=author&amp;query=Simsar%2C+M">Mehmet Simsar</a>, <a href="/search/cs?searchtype=author&amp;query=Erdemir%2C+E+B">Emine Bensu Erdemir</a>, <a href="/search/cs?searchtype=author&amp;query=Alanbay%2C+A">Abdullah Alanbay</a>, <a href="/search/cs?searchtype=author&amp;query=Sekuboyina%2C+A">Anjany Sekuboyina</a>, <a href="/search/cs?searchtype=author&amp;query=Lafci%2C+B">Berkan Lafci</a>, <a href="/search/cs?searchtype=author&amp;query=Bluethgen%2C+C">Christian Bluethgen</a>, <a href="/search/cs?searchtype=author&amp;query=Ozdemir%2C+M+K">Mehmet Kemal Ozdemir</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.17834v2-abstract-short" style="display: inline;"> While computer vision has achieved tremendous success with multimodal encoding and direct textual interaction with images via chat-based large language models, similar advancements in medical imaging AI, particularly in 3D imaging, have been limited due to the scarcity of comprehensive datasets. To address this critical gap, we introduce CT-RATE, the first dataset that pairs 3D medical images with&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.17834v2-abstract-full').style.display = 'inline'; document.getElementById('2403.17834v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.17834v2-abstract-full" style="display: none;"> While computer vision has achieved tremendous success with multimodal encoding and direct textual interaction with images via chat-based large language models, similar advancements in medical imaging AI, particularly in 3D imaging, have been limited due to the scarcity of comprehensive datasets. To address this critical gap, we introduce CT-RATE, the first dataset that pairs 3D medical images with corresponding textual reports. CT-RATE comprises 25,692 non-contrast 3D chest CT scans from 21,304 unique patients. Through various reconstructions, these scans are expanded to 50,188 volumes, totaling over 14.3 million 2D slices. Each scan is accompanied by its corresponding radiology report. Leveraging CT-RATE, we develop CT-CLIP, a CT-focused contrastive language-image pretraining framework designed for broad applications without the need for task-specific training. We demonstrate how CT-CLIP can be used in two tasks: multi-abnormality detection and case retrieval. Remarkably, in multi-abnormality detection, CT-CLIP outperforms state-of-the-art fully supervised models across all key metrics, effectively eliminating the need for manual annotation. In case retrieval, it efficiently retrieves relevant cases using either image or textual queries, thereby enhancing knowledge dissemination. By combining CT-CLIP&#39;s vision encoder with a pretrained large language model, we create CT-CHAT, a vision-language foundational chat model for 3D chest CT volumes. Finetuned on over 2.7 million question-answer pairs derived from the CT-RATE dataset, CT-CHAT surpasses other multimodal AI assistants, underscoring the necessity for specialized methods in 3D medical imaging. Collectively, the open-source release of CT-RATE, CT-CLIP, and CT-CHAT not only addresses critical challenges in 3D medical imaging but also lays the groundwork for future innovations in medical AI and improved patient care. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.17834v2-abstract-full').style.display = 'none'; document.getElementById('2403.17834v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.14499">arXiv:2403.14499</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.14499">pdf</a>, <a href="https://arxiv.org/format/2403.14499">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Denoising Diffusion Models for 3D Healthy Brain Tissue Inpainting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Durrer%2C+A">Alicia Durrer</a>, <a href="/search/cs?searchtype=author&amp;query=Wolleb%2C+J">Julia Wolleb</a>, <a href="/search/cs?searchtype=author&amp;query=Bieder%2C+F">Florentin Bieder</a>, <a href="/search/cs?searchtype=author&amp;query=Friedrich%2C+P">Paul Friedrich</a>, <a href="/search/cs?searchtype=author&amp;query=Melie-Garcia%2C+L">Lester Melie-Garcia</a>, <a href="/search/cs?searchtype=author&amp;query=Ocampo-Pineda%2C+M">Mario Ocampo-Pineda</a>, <a href="/search/cs?searchtype=author&amp;query=Bercea%2C+C+I">Cosmin I. Bercea</a>, <a href="/search/cs?searchtype=author&amp;query=Hamamci%2C+I+E">Ibrahim E. Hamamci</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a>, <a href="/search/cs?searchtype=author&amp;query=Yaldizli%2C+%C3%96">脰zg眉r Yaldizli</a>, <a href="/search/cs?searchtype=author&amp;query=Granziera%2C+C">Cristina Granziera</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B+H">Bjoern H. Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Cattin%2C+P+C">Philippe C. Cattin</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.14499v1-abstract-short" style="display: inline;"> Monitoring diseases that affect the brain&#39;s structural integrity requires automated analysis of magnetic resonance (MR) images, e.g., for the evaluation of volumetric changes. However, many of the evaluation tools are optimized for analyzing healthy tissue. To enable the evaluation of scans containing pathological tissue, it is therefore required to restore healthy tissue in the pathological areas&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.14499v1-abstract-full').style.display = 'inline'; document.getElementById('2403.14499v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.14499v1-abstract-full" style="display: none;"> Monitoring diseases that affect the brain&#39;s structural integrity requires automated analysis of magnetic resonance (MR) images, e.g., for the evaluation of volumetric changes. However, many of the evaluation tools are optimized for analyzing healthy tissue. To enable the evaluation of scans containing pathological tissue, it is therefore required to restore healthy tissue in the pathological areas. In this work, we explore and extend denoising diffusion models for consistent inpainting of healthy 3D brain tissue. We modify state-of-the-art 2D, pseudo-3D, and 3D methods working in the image space, as well as 3D latent and 3D wavelet diffusion models, and train them to synthesize healthy brain tissue. Our evaluation shows that the pseudo-3D model performs best regarding the structural-similarity index, peak signal-to-noise ratio, and mean squared error. To emphasize the clinical relevance, we fine-tune this model on data containing synthetic MS lesions and evaluate it on a downstream brain tissue segmentation task, whereby it outperforms the established FMRIB Software Library (FSL) lesion-filling method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.14499v1-abstract-full').style.display = 'none'; document.getElementById('2403.14499v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.07116">arXiv:2403.07116</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.07116">pdf</a>, <a href="https://arxiv.org/format/2403.07116">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Simulation-Based Segmentation of Blood Vessels in Cerebral 3D OCTA Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wittmann%2C+B">Bastian Wittmann</a>, <a href="/search/cs?searchtype=author&amp;query=Glandorf%2C+L">Lukas Glandorf</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=Amiranashvili%2C+T">Tamaz Amiranashvili</a>, <a href="/search/cs?searchtype=author&amp;query=W%C3%A4lchli%2C+T">Thomas W盲lchli</a>, <a href="/search/cs?searchtype=author&amp;query=Razansky%2C+D">Daniel Razansky</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.07116v1-abstract-short" style="display: inline;"> Segmentation of blood vessels in murine cerebral 3D OCTA images is foundational for in vivo quantitative analysis of the effects of neurovascular disorders, such as stroke or Alzheimer&#39;s, on the vascular network. However, to accurately segment blood vessels with state-of-the-art deep learning methods, a vast amount of voxel-level annotations is required. Since cerebral 3D OCTA images are typically&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07116v1-abstract-full').style.display = 'inline'; document.getElementById('2403.07116v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.07116v1-abstract-full" style="display: none;"> Segmentation of blood vessels in murine cerebral 3D OCTA images is foundational for in vivo quantitative analysis of the effects of neurovascular disorders, such as stroke or Alzheimer&#39;s, on the vascular network. However, to accurately segment blood vessels with state-of-the-art deep learning methods, a vast amount of voxel-level annotations is required. Since cerebral 3D OCTA images are typically plagued by artifacts and generally have a low signal-to-noise ratio, acquiring manual annotations poses an especially cumbersome and time-consuming task. To alleviate the need for manual annotations, we propose utilizing synthetic data to supervise segmentation algorithms. To this end, we extract patches from vessel graphs and transform them into synthetic cerebral 3D OCTA images paired with their matching ground truth labels by simulating the most dominant 3D OCTA artifacts. In extensive experiments, we demonstrate that our approach achieves competitive results, enabling annotation-free blood vessel segmentation in cerebral 3D OCTA images. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07116v1-abstract-full').style.display = 'none'; document.getElementById('2403.07116v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.06801">arXiv:2403.06801</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.06801">pdf</a>, <a href="https://arxiv.org/format/2403.06801">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> CT2Rep: Automated Radiology Report Generation for 3D Medical Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hamamci%2C+I+E">Ibrahim Ethem Hamamci</a>, <a href="/search/cs?searchtype=author&amp;query=Er%2C+S">Sezgin Er</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.06801v2-abstract-short" style="display: inline;"> Medical imaging plays a crucial role in diagnosis, with radiology reports serving as vital documentation. Automating report generation has emerged as a critical need to alleviate the workload of radiologists. While machine learning has facilitated report generation for 2D medical imaging, extending this to 3D has been unexplored due to computational complexity and data scarcity. We introduce the f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.06801v2-abstract-full').style.display = 'inline'; document.getElementById('2403.06801v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.06801v2-abstract-full" style="display: none;"> Medical imaging plays a crucial role in diagnosis, with radiology reports serving as vital documentation. Automating report generation has emerged as a critical need to alleviate the workload of radiologists. While machine learning has facilitated report generation for 2D medical imaging, extending this to 3D has been unexplored due to computational complexity and data scarcity. We introduce the first method to generate radiology reports for 3D medical imaging, specifically targeting chest CT volumes. Given the absence of comparable methods, we establish a baseline using an advanced 3D vision encoder in medical imaging to demonstrate our method&#39;s effectiveness, which leverages a novel auto-regressive causal transformer. Furthermore, recognizing the benefits of leveraging information from previous visits, we augment CT2Rep with a cross-attention-based multi-modal fusion module and hierarchical memory, enabling the incorporation of longitudinal multimodal data. Access our code at https://github.com/ibrahimethemhamamci/CT2Rep <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.06801v2-abstract-full').style.display = 'none'; document.getElementById('2403.06801v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.04500">arXiv:2403.04500</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.04500">pdf</a>, <a href="https://arxiv.org/format/2403.04500">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> A Learnable Prior Improves Inverse Tumor Growth Modeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Weidner%2C+J">Jonas Weidner</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Balcerak%2C+M">Michal Balcerak</a>, <a href="/search/cs?searchtype=author&amp;query=Metz%2C+M">Marie-Christin Metz</a>, <a href="/search/cs?searchtype=author&amp;query=Litvinov%2C+S">Sergey Litvinov</a>, <a href="/search/cs?searchtype=author&amp;query=Kaltenbach%2C+S">Sebastian Kaltenbach</a>, <a href="/search/cs?searchtype=author&amp;query=Feiner%2C+L">Leonhard Feiner</a>, <a href="/search/cs?searchtype=author&amp;query=Lux%2C+L">Laurin Lux</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Lipkova%2C+J">Jana Lipkova</a>, <a href="/search/cs?searchtype=author&amp;query=Latz%2C+J">Jonas Latz</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.04500v2-abstract-short" style="display: inline;"> Biophysical modeling, particularly involving partial differential equations (PDEs), offers significant potential for tailoring disease treatment protocols to individual patients. However, the inverse problem-solving aspect of these models presents a substantial challenge, either due to the high computational requirements of model-based approaches or the limited robustness of deep learning (DL) met&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.04500v2-abstract-full').style.display = 'inline'; document.getElementById('2403.04500v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.04500v2-abstract-full" style="display: none;"> Biophysical modeling, particularly involving partial differential equations (PDEs), offers significant potential for tailoring disease treatment protocols to individual patients. However, the inverse problem-solving aspect of these models presents a substantial challenge, either due to the high computational requirements of model-based approaches or the limited robustness of deep learning (DL) methods. We propose a novel framework that leverages the unique strengths of both approaches in a synergistic manner. Our method incorporates a DL ensemble for initial parameter estimation, facilitating efficient downstream evolutionary sampling initialized with this DL-based prior. We showcase the effectiveness of integrating a rapid deep-learning algorithm with a high-precision evolution strategy in estimating brain tumor cell concentrations from magnetic resonance images. The DL-Prior plays a pivotal role, significantly constraining the effective sampling-parameter space. This reduction results in a fivefold convergence acceleration and a Dice-score of 95%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.04500v2-abstract-full').style.display = 'none'; document.getElementById('2403.04500v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.16368">arXiv:2402.16368</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.16368">pdf</a>, <a href="https://arxiv.org/format/2402.16368">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s00330-024-11155-y">10.1007/s00330-024-11155-y <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> SPINEPS -- Automatic Whole Spine Segmentation of T2-weighted MR images using a Two-Phase Approach to Multi-class Semantic and Instance Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=M%C3%B6ller%2C+H">Hendrik M枚ller</a>, <a href="/search/cs?searchtype=author&amp;query=Graf%2C+R">Robert Graf</a>, <a href="/search/cs?searchtype=author&amp;query=Schmitt%2C+J">Joachim Schmitt</a>, <a href="/search/cs?searchtype=author&amp;query=Keinert%2C+B">Benjamin Keinert</a>, <a href="/search/cs?searchtype=author&amp;query=Atad%2C+M">Matan Atad</a>, <a href="/search/cs?searchtype=author&amp;query=Sekuboyina%2C+A">Anjany Sekuboyina</a>, <a href="/search/cs?searchtype=author&amp;query=Streckenbach%2C+F">Felix Streckenbach</a>, <a href="/search/cs?searchtype=author&amp;query=Sch%C3%B6n%2C+H">Hanna Sch枚n</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Kroencke%2C+T">Thomas Kroencke</a>, <a href="/search/cs?searchtype=author&amp;query=Bette%2C+S">Stefanie Bette</a>, <a href="/search/cs?searchtype=author&amp;query=Willich%2C+S">Stefan Willich</a>, <a href="/search/cs?searchtype=author&amp;query=Keil%2C+T">Thomas Keil</a>, <a href="/search/cs?searchtype=author&amp;query=Niendorf%2C+T">Thoralf Niendorf</a>, <a href="/search/cs?searchtype=author&amp;query=Pischon%2C+T">Tobias Pischon</a>, <a href="/search/cs?searchtype=author&amp;query=Endemann%2C+B">Beate Endemann</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.16368v2-abstract-short" style="display: inline;"> Purpose. To present SPINEPS, an open-source deep learning approach for semantic and instance segmentation of 14 spinal structures (ten vertebra substructures, intervertebral discs, spinal cord, spinal canal, and sacrum) in whole body T2w MRI. Methods. During this HIPPA-compliant, retrospective study, we utilized the public SPIDER dataset (218 subjects, 63% female) and a subset of the German Nati&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.16368v2-abstract-full').style.display = 'inline'; document.getElementById('2402.16368v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.16368v2-abstract-full" style="display: none;"> Purpose. To present SPINEPS, an open-source deep learning approach for semantic and instance segmentation of 14 spinal structures (ten vertebra substructures, intervertebral discs, spinal cord, spinal canal, and sacrum) in whole body T2w MRI. Methods. During this HIPPA-compliant, retrospective study, we utilized the public SPIDER dataset (218 subjects, 63% female) and a subset of the German National Cohort (1423 subjects, mean age 53, 49% female) for training and evaluation. We combined CT and T2w segmentations to train models that segment 14 spinal structures in T2w sagittal scans both semantically and instance-wise. Performance evaluation metrics included Dice similarity coefficient, average symmetrical surface distance, panoptic quality, segmentation quality, and recognition quality. Statistical significance was assessed using the Wilcoxon signed-rank test. An in-house dataset was used to qualitatively evaluate out-of-distribution samples. Results. On the public dataset, our approach outperformed the baseline (instance-wise vertebra dice score 0.929 vs. 0.907, p-value&lt;0.001). Training on auto-generated annotations and evaluating on manually corrected test data from the GNC yielded global dice scores of 0.900 for vertebrae, 0.960 for intervertebral discs, and 0.947 for the spinal canal. Incorporating the SPIDER dataset during training increased these scores to 0.920, 0.967, 0.958, respectively. Conclusions. The proposed segmentation approach offers robust segmentation of 14 spinal structures in T2w sagittal images, including the spinal cord, spinal canal, intervertebral discs, endplate, sacrum, and vertebrae. The approach yields both a semantic and instance mask as output, thus being easy to utilize. This marks the first publicly available algorithm for whole spine segmentation in sagittal T2w MR imaging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.16368v2-abstract-full').style.display = 'none'; document.getElementById('2402.16368v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">https://github.com/Hendrik-code/spineps</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.09262">arXiv:2402.09262</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.09262">pdf</a>, <a href="https://arxiv.org/format/2402.09262">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MultiMedEval: A Benchmark and a Toolkit for Evaluating Medical Vision-Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Royer%2C+C">Corentin Royer</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Sekuboyina%2C+A">Anjany Sekuboyina</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.09262v2-abstract-short" style="display: inline;"> We introduce MultiMedEval, an open-source toolkit for fair and reproducible evaluation of large, medical vision-language models (VLM). MultiMedEval comprehensively assesses the models&#39; performance on a broad array of six multi-modal tasks, conducted over 23 datasets, and spanning over 11 medical domains. The chosen tasks and performance metrics are based on their widespread adoption in the communi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.09262v2-abstract-full').style.display = 'inline'; document.getElementById('2402.09262v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.09262v2-abstract-full" style="display: none;"> We introduce MultiMedEval, an open-source toolkit for fair and reproducible evaluation of large, medical vision-language models (VLM). MultiMedEval comprehensively assesses the models&#39; performance on a broad array of six multi-modal tasks, conducted over 23 datasets, and spanning over 11 medical domains. The chosen tasks and performance metrics are based on their widespread adoption in the community and their diversity, ensuring a thorough evaluation of the model&#39;s overall generalizability. We open-source a Python toolkit (github.com/corentin-ryr/MultiMedEval) with a simple interface and setup process, enabling the evaluation of any VLM in just a few lines of code. Our goal is to simplify the intricate landscape of VLM evaluation, thus promoting fair and uniform benchmarking of future models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.09262v2-abstract-full').style.display = 'none'; document.getElementById('2402.09262v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under review at MIDL 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.17670">arXiv:2312.17670</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.17670">pdf</a>, <a href="https://arxiv.org/format/2312.17670">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Tissues and Organs">q-bio.TO</span> </div> </div> <p class="title is-5 mathjax"> Benchmarking the CoW with the TopCoW Challenge: Topology-Aware Anatomical Segmentation of the Circle of Willis for CTA and MRA </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Musio%2C+F">Fabio Musio</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Y">Yihui Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Juchler%2C+N">Norman Juchler</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=Al-Maskari%2C+R">Rami Al-Maskari</a>, <a href="/search/cs?searchtype=author&amp;query=H%C3%B6her%2C+L">Luciano H枚her</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Hamamci%2C+I+E">Ibrahim Ethem Hamamci</a>, <a href="/search/cs?searchtype=author&amp;query=Sekuboyina%2C+A">Anjany Sekuboyina</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Houjing Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Prabhakar%2C+C">Chinmay Prabhakar</a>, <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Navarro%2C+F">Fernando Navarro</a>, <a href="/search/cs?searchtype=author&amp;query=Menten%2C+M">Martin Menten</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Vos%2C+I">Iris Vos</a>, <a href="/search/cs?searchtype=author&amp;query=Ruigrok%2C+Y">Ynte Ruigrok</a>, <a href="/search/cs?searchtype=author&amp;query=Velthuis%2C+B">Birgitta Velthuis</a>, <a href="/search/cs?searchtype=author&amp;query=Kuijf%2C+H">Hugo Kuijf</a>, <a href="/search/cs?searchtype=author&amp;query=H%C3%A4mmerli%2C+J">Julien H盲mmerli</a> , et al. (59 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.17670v3-abstract-short" style="display: inline;"> The Circle of Willis (CoW) is an important network of arteries connecting major circulations of the brain. Its vascular architecture is believed to affect the risk, severity, and clinical outcome of serious neuro-vascular diseases. However, characterizing the highly variable CoW anatomy is still a manual and time-consuming expert task. The CoW is usually imaged by two angiographic imaging modaliti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.17670v3-abstract-full').style.display = 'inline'; document.getElementById('2312.17670v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.17670v3-abstract-full" style="display: none;"> The Circle of Willis (CoW) is an important network of arteries connecting major circulations of the brain. Its vascular architecture is believed to affect the risk, severity, and clinical outcome of serious neuro-vascular diseases. However, characterizing the highly variable CoW anatomy is still a manual and time-consuming expert task. The CoW is usually imaged by two angiographic imaging modalities, magnetic resonance angiography (MRA) and computed tomography angiography (CTA), but there exist limited public datasets with annotations on CoW anatomy, especially for CTA. Therefore we organized the TopCoW Challenge in 2023 with the release of an annotated CoW dataset. The TopCoW dataset was the first public dataset with voxel-level annotations for thirteen possible CoW vessel components, enabled by virtual-reality (VR) technology. It was also the first large dataset with paired MRA and CTA from the same patients. TopCoW challenge formalized the CoW characterization problem as a multiclass anatomical segmentation task with an emphasis on topological metrics. We invited submissions worldwide for the CoW segmentation task, which attracted over 140 registered participants from four continents. The top performing teams managed to segment many CoW components to Dice scores around 90%, but with lower scores for communicating arteries and rare variants. There were also topological mistakes for predictions with high Dice scores. Additional topological analysis revealed further areas for improvement in detecting certain CoW components and matching CoW variant topology accurately. TopCoW represented a first attempt at benchmarking the CoW anatomical segmentation task for MRA and CTA, both morphologically and topologically. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.17670v3-abstract-full').style.display = 'none'; document.getElementById('2312.17670v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 11 figures, 9 tables. Summary Paper for the MICCAI TopCoW 2023 Challenge</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.02608">arXiv:2312.02608</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.02608">pdf</a>, <a href="https://arxiv.org/format/2312.02608">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Panoptica -- instance-wise evaluation of 3D semantic and instance segmentation maps </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%B6ller%2C+H">Hendrik M枚ller</a>, <a href="/search/cs?searchtype=author&amp;query=Buchner%2C+J+A">Josef A. Buchner</a>, <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Rosier%2C+M">Marcel Rosier</a>, <a href="/search/cs?searchtype=author&amp;query=Mekki%2C+I">Isra Mekki</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Negwer%2C+M">Moritz Negwer</a>, <a href="/search/cs?searchtype=author&amp;query=Al-Maskari%2C+R">Rami Al-Maskari</a>, <a href="/search/cs?searchtype=author&amp;query=Ert%C3%BCrk%2C+A">Ali Ert眉rk</a>, <a href="/search/cs?searchtype=author&amp;query=Vinayahalingam%2C+S">Shankeeth Vinayahalingam</a>, <a href="/search/cs?searchtype=author&amp;query=Isensee%2C+F">Fabian Isensee</a>, <a href="/search/cs?searchtype=author&amp;query=Pati%2C+S">Sarthak Pati</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a>, <a href="/search/cs?searchtype=author&amp;query=Ehrlich%2C+S+K">Stefan K. Ehrlich</a>, <a href="/search/cs?searchtype=author&amp;query=Reinke%2C+A">Annika Reinke</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.02608v1-abstract-short" style="display: inline;"> This paper introduces panoptica, a versatile and performance-optimized package designed for computing instance-wise segmentation quality metrics from 2D and 3D segmentation maps. panoptica addresses the limitations of existing metrics and provides a modular framework that complements the original intersection over union-based panoptic quality with other metrics, such as the distance metric Average&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02608v1-abstract-full').style.display = 'inline'; document.getElementById('2312.02608v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.02608v1-abstract-full" style="display: none;"> This paper introduces panoptica, a versatile and performance-optimized package designed for computing instance-wise segmentation quality metrics from 2D and 3D segmentation maps. panoptica addresses the limitations of existing metrics and provides a modular framework that complements the original intersection over union-based panoptic quality with other metrics, such as the distance metric Average Symmetric Surface Distance. The package is open-source, implemented in Python, and accompanied by comprehensive documentation and tutorials. panoptica employs a three-step metrics computation process to cover diverse use cases. The efficacy of panoptica is demonstrated on various real-world biomedical datasets, where an instance-wise evaluation is instrumental for an accurate representation of the underlying clinical task. Overall, we envision panoptica as a valuable tool facilitating in-depth evaluation of segmentation methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02608v1-abstract-full').style.display = 'none'; document.getElementById('2312.02608v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 6 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.16536">arXiv:2311.16536</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.16536">pdf</a>, <a href="https://arxiv.org/format/2311.16536">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> Personalized Predictions of Glioblastoma Infiltration: Mathematical Models, Physics-Informed Neural Networks and Multimodal Scans </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+R+Z">Ray Zirui Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Balcerak%2C+M">Michal Balcerak</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+A">Andy Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Lowengrub%2C+J+S">John S. Lowengrub</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.16536v3-abstract-short" style="display: inline;"> Predicting the infiltration of Glioblastoma (GBM) from medical MRI scans is crucial for understanding tumor growth dynamics and designing personalized radiotherapy treatment plans.Mathematical models of GBM growth can complement the data in the prediction of spatial distributions of tumor cells. However, this requires estimating patient-specific parameters of the model from clinical data, which is&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.16536v3-abstract-full').style.display = 'inline'; document.getElementById('2311.16536v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.16536v3-abstract-full" style="display: none;"> Predicting the infiltration of Glioblastoma (GBM) from medical MRI scans is crucial for understanding tumor growth dynamics and designing personalized radiotherapy treatment plans.Mathematical models of GBM growth can complement the data in the prediction of spatial distributions of tumor cells. However, this requires estimating patient-specific parameters of the model from clinical data, which is a challenging inverse problem due to limited temporal data and the limited time between imaging and diagnosis. This work proposes a method that uses Physics-Informed Neural Networks (PINNs) to estimate patient-specific parameters of a reaction-diffusion PDE model of GBM growth from a single 3D structural MRI snapshot. PINNs embed both the data and the PDE into a loss function, thus integrating theory and data. Key innovations include the identification and estimation of characteristic non-dimensional parameters, a pre-training step that utilizes the non-dimensional parameters and a fine-tuning step to determine the patient specific parameters. Additionally, the diffuse domain method is employed to handle the complex brain geometry within the PINN framework. Our method is validated both on synthetic and patient datasets, and shows promise for real-time parametric inference in the clinical setting for personalized GBM treatment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.16536v3-abstract-full').style.display = 'none'; document.getElementById('2311.16536v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 92-08; 92C50; 35Q92 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> J.3; J.2; I.2.6 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.16863">arXiv:2308.16863</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.16863">pdf</a>, <a href="https://arxiv.org/format/2308.16863">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Self-pruning Graph Neural Network for Predicting Inflammatory Disease Activity in Multiple Sclerosis from Brain MR Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Prabhakar%2C+C">Chinmay Prabhakar</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=Loehr%2C+T">Timo Loehr</a>, <a href="/search/cs?searchtype=author&amp;query=Niu%2C+C">Chen Niu</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BChlau%2C+M">Mark M眉hlau</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.16863v1-abstract-short" style="display: inline;"> Multiple Sclerosis (MS) is a severe neurological disease characterized by inflammatory lesions in the central nervous system. Hence, predicting inflammatory disease activity is crucial for disease assessment and treatment. However, MS lesions can occur throughout the brain and vary in shape, size and total count among patients. The high variance in lesion load and locations makes it challenging fo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.16863v1-abstract-full').style.display = 'inline'; document.getElementById('2308.16863v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.16863v1-abstract-full" style="display: none;"> Multiple Sclerosis (MS) is a severe neurological disease characterized by inflammatory lesions in the central nervous system. Hence, predicting inflammatory disease activity is crucial for disease assessment and treatment. However, MS lesions can occur throughout the brain and vary in shape, size and total count among patients. The high variance in lesion load and locations makes it challenging for machine learning methods to learn a globally effective representation of whole-brain MRI scans to assess and predict disease. Technically it is non-trivial to incorporate essential biomarkers such as lesion load or spatial proximity. Our work represents the first attempt to utilize graph neural networks (GNN) to aggregate these biomarkers for a novel global representation. We propose a two-stage MS inflammatory disease activity prediction approach. First, a 3D segmentation network detects lesions, and a self-supervised algorithm extracts their image features. Second, the detected lesions are used to build a patient graph. The lesions act as nodes in the graph and are initialized with image features extracted in the first stage. Finally, the lesions are connected based on their spatial proximity and the inflammatory disease activity prediction is formulated as a graph classification task. Furthermore, we propose a self-pruning strategy to auto-select the most critical lesions for prediction. Our proposed method outperforms the existing baseline by a large margin (AUCs of 0.67 vs. 0.61 and 0.66 vs. 0.60 for one-year and two-year inflammatory disease activity, respectively). Finally, our proposed method enjoys inherent explainability by assigning an importance score to each lesion for the overall prediction. Code is available at https://github.com/chinmay5/ms_ida.git <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.16863v1-abstract-full').style.display = 'none'; document.getElementById('2308.16863v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.16139">arXiv:2308.16139</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.16139">pdf</a>, <a href="https://arxiv.org/format/2308.16139">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> MedShapeNet -- A Large-Scale Dataset of 3D Medical Shapes for Computer Vision </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+J">Jianning Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Z">Zongwei Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jiancheng Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Pepe%2C+A">Antonio Pepe</a>, <a href="/search/cs?searchtype=author&amp;query=Gsaxner%2C+C">Christina Gsaxner</a>, <a href="/search/cs?searchtype=author&amp;query=Luijten%2C+G">Gijs Luijten</a>, <a href="/search/cs?searchtype=author&amp;query=Qu%2C+C">Chongyu Qu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+T">Tiezheng Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xiaoxi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+W">Wenxuan Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wodzinski%2C+M">Marek Wodzinski</a>, <a href="/search/cs?searchtype=author&amp;query=Friedrich%2C+P">Paul Friedrich</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+K">Kangxian Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+Y">Yuan Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Ambigapathy%2C+N">Narmada Ambigapathy</a>, <a href="/search/cs?searchtype=author&amp;query=Nasca%2C+E">Enrico Nasca</a>, <a href="/search/cs?searchtype=author&amp;query=Solak%2C+N">Naida Solak</a>, <a href="/search/cs?searchtype=author&amp;query=Melito%2C+G+M">Gian Marco Melito</a>, <a href="/search/cs?searchtype=author&amp;query=Vu%2C+V+D">Viet Duc Vu</a>, <a href="/search/cs?searchtype=author&amp;query=Memon%2C+A+R">Afaque R. Memon</a>, <a href="/search/cs?searchtype=author&amp;query=Schlachta%2C+C">Christopher Schlachta</a>, <a href="/search/cs?searchtype=author&amp;query=De+Ribaupierre%2C+S">Sandrine De Ribaupierre</a>, <a href="/search/cs?searchtype=author&amp;query=Patel%2C+R">Rajnikant Patel</a>, <a href="/search/cs?searchtype=author&amp;query=Eagleson%2C+R">Roy Eagleson</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xiaojun Chen</a> , et al. (132 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.16139v5-abstract-short" style="display: inline;"> Prior to the deep learning era, shape was commonly used to describe the objects. Nowadays, state-of-the-art (SOTA) algorithms in medical imaging are predominantly diverging from computer vision, where voxel grids, meshes, point clouds, and implicit surface models are used. This is seen from numerous shape-related publications in premier vision conferences as well as the growing popularity of Shape&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.16139v5-abstract-full').style.display = 'inline'; document.getElementById('2308.16139v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.16139v5-abstract-full" style="display: none;"> Prior to the deep learning era, shape was commonly used to describe the objects. Nowadays, state-of-the-art (SOTA) algorithms in medical imaging are predominantly diverging from computer vision, where voxel grids, meshes, point clouds, and implicit surface models are used. This is seen from numerous shape-related publications in premier vision conferences as well as the growing popularity of ShapeNet (about 51,300 models) and Princeton ModelNet (127,915 models). For the medical domain, we present a large collection of anatomical shapes (e.g., bones, organs, vessels) and 3D models of surgical instrument, called MedShapeNet, created to facilitate the translation of data-driven vision algorithms to medical applications and to adapt SOTA vision algorithms to medical problems. As a unique feature, we directly model the majority of shapes on the imaging data of real patients. As of today, MedShapeNet includes 23 dataset with more than 100,000 shapes that are paired with annotations (ground truth). Our data is freely accessible via a web interface and a Python application programming interface (API) and can be used for discriminative, reconstructive, and variational benchmarks as well as various applications in virtual, augmented, or mixed reality, and 3D printing. Exemplary, we present use cases in the fields of classification of brain tumors, facial and skull reconstructions, multi-class anatomy completion, education, and 3D printing. In future, we will extend the data and improve the interfaces. The project pages are: https://medshapenet.ikim.nrw/ and https://github.com/Jianningli/medshapenet-feedback <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.16139v5-abstract-full').style.display = 'none'; document.getElementById('2308.16139v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T01 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.09345">arXiv:2308.09345</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.09345">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1186/s41747-023-00385-2">10.1186/s41747-023-00385-2 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Denoising diffusion-based MRI to CT image translation enables automated spinal segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Graf%2C+R">Robert Graf</a>, <a href="/search/cs?searchtype=author&amp;query=Schmitt%2C+J">Joachim Schmitt</a>, <a href="/search/cs?searchtype=author&amp;query=Schlaeger%2C+S">Sarah Schlaeger</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%B6ller%2C+H+K">Hendrik Kristian M枚ller</a>, <a href="/search/cs?searchtype=author&amp;query=Sideri-Lampretsa%2C+V">Vasiliki Sideri-Lampretsa</a>, <a href="/search/cs?searchtype=author&amp;query=Sekuboyina%2C+A">Anjany Sekuboyina</a>, <a href="/search/cs?searchtype=author&amp;query=Krieg%2C+S+M">Sandro Manuel Krieg</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J+S">Jan Stefan Kirschke</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.09345v2-abstract-short" style="display: inline;"> Background: Automated segmentation of spinal MR images plays a vital role both scientifically and clinically. However, accurately delineating posterior spine structures presents challenges. Methods: This retrospective study, approved by the ethical committee, involved translating T1w and T2w MR image series into CT images in a total of n=263 pairs of CT/MR series. Landmark-based registration was&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.09345v2-abstract-full').style.display = 'inline'; document.getElementById('2308.09345v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.09345v2-abstract-full" style="display: none;"> Background: Automated segmentation of spinal MR images plays a vital role both scientifically and clinically. However, accurately delineating posterior spine structures presents challenges. Methods: This retrospective study, approved by the ethical committee, involved translating T1w and T2w MR image series into CT images in a total of n=263 pairs of CT/MR series. Landmark-based registration was performed to align image pairs. We compared 2D paired (Pix2Pix, denoising diffusion implicit models (DDIM) image mode, DDIM noise mode) and unpaired (contrastive unpaired translation, SynDiff) image-to-image translation using &#34;peak signal to noise ratio&#34; (PSNR) as quality measure. A publicly available segmentation network segmented the synthesized CT datasets, and Dice scores were evaluated on in-house test sets and the &#34;MRSpineSeg Challenge&#34; volumes. The 2D findings were extended to 3D Pix2Pix and DDIM. Results: 2D paired methods and SynDiff exhibited similar translation performance and Dice scores on paired data. DDIM image mode achieved the highest image quality. SynDiff, Pix2Pix, and DDIM image mode demonstrated similar Dice scores (0.77). For craniocaudal axis rotations, at least two landmarks per vertebra were required for registration. The 3D translation outperformed the 2D approach, resulting in improved Dice scores (0.80) and anatomically accurate segmentations in a higher resolution than the original MR image. Conclusion: Two landmarks per vertebra registration enabled paired image-to-image translation from MR to CT and outperformed all unpaired approaches. The 3D techniques provided anatomically correct segmentations, avoiding underprediction of small structures like the spinous process. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.09345v2-abstract-full').style.display = 'none'; document.getElementById('2308.09345v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">35 pages, 7 figures, Code and a model weights available https://doi.org/10.5281/zenodo.8221159 and https://doi.org/10.5281/zenodo.8198697</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T99 68U10 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.1 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Eur Radiol Exp 7, 70 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.01318">arXiv:2308.01318</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.01318">pdf</a>, <a href="https://arxiv.org/format/2308.01318">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> </div> <p class="title is-5 mathjax"> Framing image registration as a landmark detection problem for label-noise-aware task representation (HitR) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Campi%2C+F">Francesco Campi</a>, <a href="/search/cs?searchtype=author&amp;query=Kukuljan%2C+I">Ivan Kukuljan</a>, <a href="/search/cs?searchtype=author&amp;query=Ehrlich%2C+S">Stefan Ehrlich</a>, <a href="/search/cs?searchtype=author&amp;query=Vinayahalingam%2C+S">Shankeeth Vinayahalingam</a>, <a href="/search/cs?searchtype=author&amp;query=Baheti%2C+B">Bhakti Baheti</a>, <a href="/search/cs?searchtype=author&amp;query=Chakrabarty%2C+S">Satrajit Chakrabarty</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Bakas%2C+S">Spyridon Bakas</a>, <a href="/search/cs?searchtype=author&amp;query=Schwarting%2C+J">Julian Schwarting</a>, <a href="/search/cs?searchtype=author&amp;query=Metz%2C+M">Marie Metz</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Heckemann%2C+R+A">Rolf A. Heckemann</a>, <a href="/search/cs?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B+H">Bjoern H. Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.01318v2-abstract-short" style="display: inline;"> Accurate image registration is pivotal in biomedical image analysis, where selecting suitable registration algorithms demands careful consideration. While numerous algorithms are available, the evaluation metrics to assess their performance have remained relatively static. This study addresses this challenge by introducing a novel evaluation metric termed Landmark Hit Rate (HitR), which focuses on&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.01318v2-abstract-full').style.display = 'inline'; document.getElementById('2308.01318v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.01318v2-abstract-full" style="display: none;"> Accurate image registration is pivotal in biomedical image analysis, where selecting suitable registration algorithms demands careful consideration. While numerous algorithms are available, the evaluation metrics to assess their performance have remained relatively static. This study addresses this challenge by introducing a novel evaluation metric termed Landmark Hit Rate (HitR), which focuses on the clinical relevance of image registration accuracy. Unlike traditional metrics such as Target Registration Error, which emphasize subresolution differences, HitR considers whether registration algorithms successfully position landmarks within defined confidence zones. This paradigm shift acknowledges the inherent annotation noise in medical images, allowing for more meaningful assessments. To equip HitR with label-noise-awareness, we propose defining these confidence zones based on an Inter-rater Variance analysis. Consequently, hit rate curves are computed for varying landmark zone sizes, enabling performance measurement for a task-specific level of accuracy. Our approach offers a more realistic and meaningful assessment of image registration algorithms, reflecting their suitability for clinical and biomedical applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.01318v2-abstract-full').style.display = 'none'; document.getElementById('2308.01318v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.16556">arXiv:2306.16556</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.16556">pdf</a>, <a href="https://arxiv.org/format/2306.16556">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Inter-Rater Uncertainty Quantification in Medical Image Segmentation via Rater-Specific Bayesian Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Q">Qingqiao Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+J">Jing Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+Y">Yunhao Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhangg%2C+Z">Zhiheng Zhangg</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jianguo Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.16556v2-abstract-short" style="display: inline;"> Automated medical image segmentation inherently involves a certain degree of uncertainty. One key factor contributing to this uncertainty is the ambiguity that can arise in determining the boundaries of a target region of interest, primarily due to variations in image appearance. On top of this, even among experts in the field, different opinions can emerge regarding the precise definition of spec&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.16556v2-abstract-full').style.display = 'inline'; document.getElementById('2306.16556v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.16556v2-abstract-full" style="display: none;"> Automated medical image segmentation inherently involves a certain degree of uncertainty. One key factor contributing to this uncertainty is the ambiguity that can arise in determining the boundaries of a target region of interest, primarily due to variations in image appearance. On top of this, even among experts in the field, different opinions can emerge regarding the precise definition of specific anatomical structures. This work specifically addresses the modeling of segmentation uncertainty, known as inter-rater uncertainty. Its primary objective is to explore and analyze the variability in segmentation outcomes that can occur when multiple experts in medical imaging interpret and annotate the same images. We introduce a novel Bayesian neural network-based architecture to estimate inter-rater uncertainty in medical image segmentation. Our approach has three key advancements. Firstly, we introduce a one-encoder-multi-decoder architecture specifically tailored for uncertainty estimation, enabling us to capture the rater-specific representation of each expert involved. Secondly, we propose Bayesian modeling for the new architecture, allowing efficient capture of the inter-rater distribution, particularly in scenarios with limited annotations. Lastly, we enhance the rater-specific representation by integrating an attention module into each decoder. This module facilitates focused and refined segmentation results for each rater. We conduct extensive evaluations using synthetic and real-world datasets to validate our technical innovations rigorously. Our method surpasses existing baseline methods in five out of seven diverse tasks on the publicly available \emph{QUBIQ} dataset, considering two evaluation metrics encompassing different uncertainty aspects. Our codes, models, and the new dataset are available through our GitHub repository: https://github.com/HaoWang420/bOEMD-net . <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.16556v2-abstract-full').style.display = 'none'; document.getElementById('2306.16556v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">submitted to a journal for review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.10941">arXiv:2306.10941</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.10941">pdf</a>, <a href="https://arxiv.org/format/2306.10941">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Synthetic optical coherence tomography angiographs for detailed retinal vessel segmentation without human annotations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kreitner%2C+L">Linus Kreitner</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=Rauch%2C+N">Nikolaus Rauch</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Hagag%2C+A+M">Ahmed M. Hagag</a>, <a href="/search/cs?searchtype=author&amp;query=Fayed%2C+A+E">Alaa E. Fayed</a>, <a href="/search/cs?searchtype=author&amp;query=Sivaprasad%2C+S">Sobha Sivaprasad</a>, <a href="/search/cs?searchtype=author&amp;query=Rausch%2C+S">Sebastian Rausch</a>, <a href="/search/cs?searchtype=author&amp;query=Weichsel%2C+J">Julian Weichsel</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B+H">Bjoern H. Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Harders%2C+M">Matthias Harders</a>, <a href="/search/cs?searchtype=author&amp;query=Knier%2C+B">Benjamin Knier</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Menten%2C+M+J">Martin J. Menten</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.10941v2-abstract-short" style="display: inline;"> Optical coherence tomography angiography (OCTA) is a non-invasive imaging modality that can acquire high-resolution volumes of the retinal vasculature and aid the diagnosis of ocular, neurological and cardiac diseases. Segmenting the visible blood vessels is a common first step when extracting quantitative biomarkers from these images. Classical segmentation algorithms based on thresholding are st&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.10941v2-abstract-full').style.display = 'inline'; document.getElementById('2306.10941v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.10941v2-abstract-full" style="display: none;"> Optical coherence tomography angiography (OCTA) is a non-invasive imaging modality that can acquire high-resolution volumes of the retinal vasculature and aid the diagnosis of ocular, neurological and cardiac diseases. Segmenting the visible blood vessels is a common first step when extracting quantitative biomarkers from these images. Classical segmentation algorithms based on thresholding are strongly affected by image artifacts and limited signal-to-noise ratio. The use of modern, deep learning-based segmentation methods has been inhibited by a lack of large datasets with detailed annotations of the blood vessels. To address this issue, recent work has employed transfer learning, where a segmentation network is trained on synthetic OCTA images and is then applied to real data. However, the previously proposed simulations fail to faithfully model the retinal vasculature and do not provide effective domain adaptation. Because of this, current methods are unable to fully segment the retinal vasculature, in particular the smallest capillaries. In this work, we present a lightweight simulation of the retinal vascular network based on space colonization for faster and more realistic OCTA synthesis. We then introduce three contrast adaptation pipelines to decrease the domain gap between real and artificial images. We demonstrate the superior segmentation performance of our approach in extensive quantitative and qualitative experiments on three public datasets that compare our method to traditional computer vision algorithms and supervised training using human annotations. Finally, we make our entire pipeline publicly available, including the source code, pretrained models, and a large dataset of synthetic OCTA images. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.10941v2-abstract-full').style.display = 'none'; document.getElementById('2306.10941v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Currently under review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.19369">arXiv:2305.19369</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.19369">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> </div> <p class="title is-5 mathjax"> The Brain Tumor Segmentation (BraTS) Challenge 2023: Glioma Segmentation in Sub-Saharan Africa Patient Population (BraTS-Africa) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adewole%2C+M">Maruf Adewole</a>, <a href="/search/cs?searchtype=author&amp;query=Rudie%2C+J+D">Jeffrey D. Rudie</a>, <a href="/search/cs?searchtype=author&amp;query=Gbadamosi%2C+A">Anu Gbadamosi</a>, <a href="/search/cs?searchtype=author&amp;query=Toyobo%2C+O">Oluyemisi Toyobo</a>, <a href="/search/cs?searchtype=author&amp;query=Raymond%2C+C">Confidence Raymond</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+D">Dong Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Omidiji%2C+O">Olubukola Omidiji</a>, <a href="/search/cs?searchtype=author&amp;query=Akinola%2C+R">Rachel Akinola</a>, <a href="/search/cs?searchtype=author&amp;query=Suwaid%2C+M+A">Mohammad Abba Suwaid</a>, <a href="/search/cs?searchtype=author&amp;query=Emegoakor%2C+A">Adaobi Emegoakor</a>, <a href="/search/cs?searchtype=author&amp;query=Ojo%2C+N">Nancy Ojo</a>, <a href="/search/cs?searchtype=author&amp;query=Aguh%2C+K">Kenneth Aguh</a>, <a href="/search/cs?searchtype=author&amp;query=Kalaiwo%2C+C">Chinasa Kalaiwo</a>, <a href="/search/cs?searchtype=author&amp;query=Babatunde%2C+G">Gabriel Babatunde</a>, <a href="/search/cs?searchtype=author&amp;query=Ogunleye%2C+A">Afolabi Ogunleye</a>, <a href="/search/cs?searchtype=author&amp;query=Gbadamosi%2C+Y">Yewande Gbadamosi</a>, <a href="/search/cs?searchtype=author&amp;query=Iorpagher%2C+K">Kator Iorpagher</a>, <a href="/search/cs?searchtype=author&amp;query=Calabrese%2C+E">Evan Calabrese</a>, <a href="/search/cs?searchtype=author&amp;query=Aboian%2C+M">Mariam Aboian</a>, <a href="/search/cs?searchtype=author&amp;query=Linguraru%2C+M">Marius Linguraru</a>, <a href="/search/cs?searchtype=author&amp;query=Albrecht%2C+J">Jake Albrecht</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Janas%2C+A">Anastasia Janas</a>, <a href="/search/cs?searchtype=author&amp;query=LaBella%2C+D">Dominic LaBella</a> , et al. (26 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.19369v1-abstract-short" style="display: inline;"> Gliomas are the most common type of primary brain tumors. Although gliomas are relatively rare, they are among the deadliest types of cancer, with a survival rate of less than 2 years after diagnosis. Gliomas are challenging to diagnose, hard to treat and inherently resistant to conventional therapy. Years of extensive research to improve diagnosis and treatment of gliomas have decreased mortality&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.19369v1-abstract-full').style.display = 'inline'; document.getElementById('2305.19369v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.19369v1-abstract-full" style="display: none;"> Gliomas are the most common type of primary brain tumors. Although gliomas are relatively rare, they are among the deadliest types of cancer, with a survival rate of less than 2 years after diagnosis. Gliomas are challenging to diagnose, hard to treat and inherently resistant to conventional therapy. Years of extensive research to improve diagnosis and treatment of gliomas have decreased mortality rates across the Global North, while chances of survival among individuals in low- and middle-income countries (LMICs) remain unchanged and are significantly worse in Sub-Saharan Africa (SSA) populations. Long-term survival with glioma is associated with the identification of appropriate pathological features on brain MRI and confirmation by histopathology. Since 2012, the Brain Tumor Segmentation (BraTS) Challenge have evaluated state-of-the-art machine learning methods to detect, characterize, and classify gliomas. However, it is unclear if the state-of-the-art methods can be widely implemented in SSA given the extensive use of lower-quality MRI technology, which produces poor image contrast and resolution and more importantly, the propensity for late presentation of disease at advanced stages as well as the unique characteristics of gliomas in SSA (i.e., suspected higher rates of gliomatosis cerebri). Thus, the BraTS-Africa Challenge provides a unique opportunity to include brain MRI glioma cases from SSA in global efforts through the BraTS Challenge to develop and evaluate computer-aided-diagnostic (CAD) methods for the detection and characterization of glioma in resource-limited settings, where the potential for CAD tools to transform healthcare are more likely. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.19369v1-abstract-full').style.display = 'none'; document.getElementById('2305.19369v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: text overlap with arXiv:2107.02314</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.19112">arXiv:2305.19112</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.19112">pdf</a>, <a href="https://arxiv.org/format/2305.19112">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DENTEX: An Abnormal Tooth Detection with Dental Enumeration and Diagnosis Benchmark for Panoramic X-rays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hamamci%2C+I+E">Ibrahim Ethem Hamamci</a>, <a href="/search/cs?searchtype=author&amp;query=Er%2C+S">Sezgin Er</a>, <a href="/search/cs?searchtype=author&amp;query=Simsar%2C+E">Enis Simsar</a>, <a href="/search/cs?searchtype=author&amp;query=Yuksel%2C+A+E">Atif Emre Yuksel</a>, <a href="/search/cs?searchtype=author&amp;query=Gultekin%2C+S">Sadullah Gultekin</a>, <a href="/search/cs?searchtype=author&amp;query=Ozdemir%2C+S+D">Serife Damla Ozdemir</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Pati%2C+S">Sarthak Pati</a>, <a href="/search/cs?searchtype=author&amp;query=Stadlinger%2C+B">Bernd Stadlinger</a>, <a href="/search/cs?searchtype=author&amp;query=Mehl%2C+A">Albert Mehl</a>, <a href="/search/cs?searchtype=author&amp;query=Gundogar%2C+M">Mustafa Gundogar</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.19112v1-abstract-short" style="display: inline;"> Panoramic X-rays are frequently used in dentistry for treatment planning, but their interpretation can be both time-consuming and prone to error. Artificial intelligence (AI) has the potential to aid in the analysis of these X-rays, thereby improving the accuracy of dental diagnoses and treatment plans. Nevertheless, designing automated algorithms for this purpose poses significant challenges, mai&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.19112v1-abstract-full').style.display = 'inline'; document.getElementById('2305.19112v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.19112v1-abstract-full" style="display: none;"> Panoramic X-rays are frequently used in dentistry for treatment planning, but their interpretation can be both time-consuming and prone to error. Artificial intelligence (AI) has the potential to aid in the analysis of these X-rays, thereby improving the accuracy of dental diagnoses and treatment plans. Nevertheless, designing automated algorithms for this purpose poses significant challenges, mainly due to the scarcity of annotated data and variations in anatomical structure. To address these issues, the Dental Enumeration and Diagnosis on Panoramic X-rays Challenge (DENTEX) has been organized in association with the International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI) in 2023. This challenge aims to promote the development of algorithms for multi-label detection of abnormal teeth, using three types of hierarchically annotated data: partially annotated quadrant data, partially annotated quadrant-enumeration data, and fully annotated quadrant-enumeration-diagnosis data, inclusive of four different diagnoses. In this paper, we present the results of evaluating participant algorithms on the fully annotated data, additionally investigating performance variation for quadrant, enumeration, and diagnosis labels in the detection of abnormal teeth. The provision of this annotated dataset, alongside the results of this challenge, may lay the groundwork for the creation of AI-powered tools that can offer more precise and efficient diagnosis and treatment planning in the field of dentistry. The evaluation code and datasets can be accessed at https://github.com/ibrahimethemhamamci/DENTEX <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.19112v1-abstract-full').style.display = 'none'; document.getElementById('2305.19112v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">MICCAI 2023 Challenge</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.17096">arXiv:2305.17096</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.17096">pdf</a>, <a href="https://arxiv.org/format/2305.17096">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> GRAtt-VIS: Gated Residual Attention for Auto Rectifying Video Instance Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hannan%2C+T">Tanveer Hannan</a>, <a href="/search/cs?searchtype=author&amp;query=Koner%2C+R">Rajat Koner</a>, <a href="/search/cs?searchtype=author&amp;query=Bernhard%2C+M">Maximilian Bernhard</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Tresp%2C+V">Volker Tresp</a>, <a href="/search/cs?searchtype=author&amp;query=Schubert%2C+M">Matthias Schubert</a>, <a href="/search/cs?searchtype=author&amp;query=Seidl%2C+T">Thomas Seidl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.17096v1-abstract-short" style="display: inline;"> Recent trends in Video Instance Segmentation (VIS) have seen a growing reliance on online methods to model complex and lengthy video sequences. However, the degradation of representation and noise accumulation of the online methods, especially during occlusion and abrupt changes, pose substantial challenges. Transformer-based query propagation provides promising directions at the cost of quadratic&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.17096v1-abstract-full').style.display = 'inline'; document.getElementById('2305.17096v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.17096v1-abstract-full" style="display: none;"> Recent trends in Video Instance Segmentation (VIS) have seen a growing reliance on online methods to model complex and lengthy video sequences. However, the degradation of representation and noise accumulation of the online methods, especially during occlusion and abrupt changes, pose substantial challenges. Transformer-based query propagation provides promising directions at the cost of quadratic memory attention. However, they are susceptible to the degradation of instance features due to the above-mentioned challenges and suffer from cascading effects. The detection and rectification of such errors remain largely underexplored. To this end, we introduce \textbf{GRAtt-VIS}, \textbf{G}ated \textbf{R}esidual \textbf{Att}ention for \textbf{V}ideo \textbf{I}nstance \textbf{S}egmentation. Firstly, we leverage a Gumbel-Softmax-based gate to detect possible errors in the current frame. Next, based on the gate activation, we rectify degraded features from its past representation. Such a residual configuration alleviates the need for dedicated memory and provides a continuous stream of relevant instance features. Secondly, we propose a novel inter-instance interaction using gate activation as a mask for self-attention. This masking strategy dynamically restricts the unrepresentative instance queries in the self-attention and preserves vital information for long-term tracking. We refer to this novel combination of Gated Residual Connection and Masked Self-Attention as \textbf{GRAtt} block, which can easily be integrated into the existing propagation-based framework. Further, GRAtt blocks significantly reduce the attention overhead and simplify dynamic temporal modeling. GRAtt-VIS achieves state-of-the-art performance on YouTube-VIS and the highly challenging OVIS dataset, significantly improving over previous methods. Code is available at \url{https://github.com/Tanveer81/GRAttVIS}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.17096v1-abstract-full').style.display = 'none'; document.getElementById('2305.17096v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 5 tables, 9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.17033">arXiv:2305.17033</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.17033">pdf</a>, <a href="https://arxiv.org/format/2305.17033">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> The Brain Tumor Segmentation (BraTS) Challenge 2023: Focus on Pediatrics (CBTN-CONNECT-DIPGR-ASNR-MICCAI BraTS-PEDs) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kazerooni%2C+A+F">Anahita Fathi Kazerooni</a>, <a href="/search/cs?searchtype=author&amp;query=Khalili%2C+N">Nastaran Khalili</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xinyang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Haldar%2C+D">Debanjan Haldar</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhifan Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Anwar%2C+S+M">Syed Muhammed Anwar</a>, <a href="/search/cs?searchtype=author&amp;query=Albrecht%2C+J">Jake Albrecht</a>, <a href="/search/cs?searchtype=author&amp;query=Adewole%2C+M">Maruf Adewole</a>, <a href="/search/cs?searchtype=author&amp;query=Anazodo%2C+U">Udunna Anazodo</a>, <a href="/search/cs?searchtype=author&amp;query=Anderson%2C+H">Hannah Anderson</a>, <a href="/search/cs?searchtype=author&amp;query=Bagheri%2C+S">Sina Bagheri</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Bergquist%2C+T">Timothy Bergquist</a>, <a href="/search/cs?searchtype=author&amp;query=Borja%2C+A+J">Austin J. Borja</a>, <a href="/search/cs?searchtype=author&amp;query=Calabrese%2C+E">Evan Calabrese</a>, <a href="/search/cs?searchtype=author&amp;query=Chung%2C+V">Verena Chung</a>, <a href="/search/cs?searchtype=author&amp;query=Conte%2C+G">Gian-Marco Conte</a>, <a href="/search/cs?searchtype=author&amp;query=Dako%2C+F">Farouk Dako</a>, <a href="/search/cs?searchtype=author&amp;query=Eddy%2C+J">James Eddy</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Familiar%2C+A">Ariana Familiar</a>, <a href="/search/cs?searchtype=author&amp;query=Farahani%2C+K">Keyvan Farahani</a>, <a href="/search/cs?searchtype=author&amp;query=Haldar%2C+S">Shuvanjan Haldar</a>, <a href="/search/cs?searchtype=author&amp;query=Iglesias%2C+J+E">Juan Eugenio Iglesias</a>, <a href="/search/cs?searchtype=author&amp;query=Janas%2C+A">Anastasia Janas</a> , et al. (48 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.17033v7-abstract-short" style="display: inline;"> Pediatric tumors of the central nervous system are the most common cause of cancer-related death in children. The five-year survival rate for high-grade gliomas in children is less than 20\%. Due to their rarity, the diagnosis of these entities is often delayed, their treatment is mainly based on historic treatment concepts, and clinical trials require multi-institutional collaborations. The MICCA&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.17033v7-abstract-full').style.display = 'inline'; document.getElementById('2305.17033v7-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.17033v7-abstract-full" style="display: none;"> Pediatric tumors of the central nervous system are the most common cause of cancer-related death in children. The five-year survival rate for high-grade gliomas in children is less than 20\%. Due to their rarity, the diagnosis of these entities is often delayed, their treatment is mainly based on historic treatment concepts, and clinical trials require multi-institutional collaborations. The MICCAI Brain Tumor Segmentation (BraTS) Challenge is a landmark community benchmark event with a successful history of 12 years of resource creation for the segmentation and analysis of adult glioma. Here we present the CBTN-CONNECT-DIPGR-ASNR-MICCAI BraTS-PEDs 2023 challenge, which represents the first BraTS challenge focused on pediatric brain tumors with data acquired across multiple international consortia dedicated to pediatric neuro-oncology and clinical trials. The BraTS-PEDs 2023 challenge focuses on benchmarking the development of volumentric segmentation algorithms for pediatric brain glioma through standardized quantitative performance evaluation metrics utilized across the BraTS 2023 cluster of challenges. Models gaining knowledge from the BraTS-PEDs multi-parametric structural MRI (mpMRI) training data will be evaluated on separate validation and unseen test mpMRI dataof high-grade pediatric glioma. The CBTN-CONNECT-DIPGR-ASNR-MICCAI BraTS-PEDs 2023 challenge brings together clinicians and AI/imaging scientists to lead to faster development of automated segmentation techniques that could benefit clinical trials, and ultimately the care of children with brain tumors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.17033v7-abstract-full').style.display = 'none'; document.getElementById('2305.17033v7-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.16037">arXiv:2305.16037</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.16037">pdf</a>, <a href="https://arxiv.org/format/2305.16037">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> GenerateCT: Text-Conditional Generation of 3D Chest CT Volumes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hamamci%2C+I+E">Ibrahim Ethem Hamamci</a>, <a href="/search/cs?searchtype=author&amp;query=Er%2C+S">Sezgin Er</a>, <a href="/search/cs?searchtype=author&amp;query=Sekuboyina%2C+A">Anjany Sekuboyina</a>, <a href="/search/cs?searchtype=author&amp;query=Simsar%2C+E">Enis Simsar</a>, <a href="/search/cs?searchtype=author&amp;query=Tezcan%2C+A">Alperen Tezcan</a>, <a href="/search/cs?searchtype=author&amp;query=Simsek%2C+A+G">Ayse Gulnihan Simsek</a>, <a href="/search/cs?searchtype=author&amp;query=Esirgun%2C+S+N">Sevval Nil Esirgun</a>, <a href="/search/cs?searchtype=author&amp;query=Almas%2C+F">Furkan Almas</a>, <a href="/search/cs?searchtype=author&amp;query=Dogan%2C+I">Irem Dogan</a>, <a href="/search/cs?searchtype=author&amp;query=Dasdelen%2C+M+F">Muhammed Furkan Dasdelen</a>, <a href="/search/cs?searchtype=author&amp;query=Prabhakar%2C+C">Chinmay Prabhakar</a>, <a href="/search/cs?searchtype=author&amp;query=Reynaud%2C+H">Hadrien Reynaud</a>, <a href="/search/cs?searchtype=author&amp;query=Pati%2C+S">Sarthak Pati</a>, <a href="/search/cs?searchtype=author&amp;query=Bluethgen%2C+C">Christian Bluethgen</a>, <a href="/search/cs?searchtype=author&amp;query=Ozdemir%2C+M+K">Mehmet Kemal Ozdemir</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.16037v5-abstract-short" style="display: inline;"> GenerateCT, the first approach to generating 3D medical imaging conditioned on free-form medical text prompts, incorporates a text encoder and three key components: a novel causal vision transformer for encoding 3D CT volumes, a text-image transformer for aligning CT and text tokens, and a text-conditional super-resolution diffusion model. Without directly comparable methods in 3D medical imaging,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.16037v5-abstract-full').style.display = 'inline'; document.getElementById('2305.16037v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.16037v5-abstract-full" style="display: none;"> GenerateCT, the first approach to generating 3D medical imaging conditioned on free-form medical text prompts, incorporates a text encoder and three key components: a novel causal vision transformer for encoding 3D CT volumes, a text-image transformer for aligning CT and text tokens, and a text-conditional super-resolution diffusion model. Without directly comparable methods in 3D medical imaging, we benchmarked GenerateCT against cutting-edge methods, demonstrating its superiority across all key metrics. Importantly, we evaluated GenerateCT&#39;s clinical applications in a multi-abnormality classification task. First, we established a baseline by training a multi-abnormality classifier on our real dataset. To further assess the model&#39;s generalization to external data and performance with unseen prompts in a zero-shot scenario, we employed an external set to train the classifier, setting an additional benchmark. We conducted two experiments in which we doubled the training datasets by synthesizing an equal number of volumes for each set using GenerateCT. The first experiment demonstrated an 11% improvement in the AP score when training the classifier jointly on real and generated volumes. The second experiment showed a 7% improvement when training on both real and generated volumes based on unseen prompts. Moreover, GenerateCT enables the scaling of synthetic training datasets to arbitrary sizes. As an example, we generated 100,000 3D CTs, fivefold the number in our real set, and trained the classifier exclusively on these synthetic CTs. Impressively, this classifier surpassed the performance of the one trained on all available real data by a margin of 8%. Last, domain experts evaluated the generated volumes, confirming a high degree of alignment with the text prompt. Access our code, model weights, training data, and generated data at https://github.com/ibrahimethemhamamci/GenerateCT <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.16037v5-abstract-full').style.display = 'none'; document.getElementById('2305.16037v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.09011">arXiv:2305.09011</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.09011">pdf</a>, <a href="https://arxiv.org/format/2305.09011">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> The Brain Tumor Segmentation (BraTS) Challenge 2023: Brain MR Image Synthesis for Tumor Segmentation (BraSyn) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Conte%2C+G+M">Gian Marco Conte</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Q">Qingqiao Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Anwar%2C+S+M">Syed Muhammad Anwar</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=van+Leemput%2C+K">Koen van Leemput</a>, <a href="/search/cs?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a>, <a href="/search/cs?searchtype=author&amp;query=Diaz%2C+M">Maria Diaz</a>, <a href="/search/cs?searchtype=author&amp;query=Cole%2C+B">Byrone Cole</a>, <a href="/search/cs?searchtype=author&amp;query=Calabrese%2C+E">Evan Calabrese</a>, <a href="/search/cs?searchtype=author&amp;query=Rudie%2C+J">Jeff Rudie</a>, <a href="/search/cs?searchtype=author&amp;query=Meissen%2C+F">Felix Meissen</a>, <a href="/search/cs?searchtype=author&amp;query=Adewole%2C+M">Maruf Adewole</a>, <a href="/search/cs?searchtype=author&amp;query=Janas%2C+A">Anastasia Janas</a>, <a href="/search/cs?searchtype=author&amp;query=Kazerooni%2C+A+F">Anahita Fathi Kazerooni</a>, <a href="/search/cs?searchtype=author&amp;query=LaBella%2C+D">Dominic LaBella</a>, <a href="/search/cs?searchtype=author&amp;query=Moawad%2C+A+W">Ahmed W. Moawad</a>, <a href="/search/cs?searchtype=author&amp;query=Farahani%2C+K">Keyvan Farahani</a>, <a href="/search/cs?searchtype=author&amp;query=Eddy%2C+J">James Eddy</a>, <a href="/search/cs?searchtype=author&amp;query=Bergquist%2C+T">Timothy Bergquist</a>, <a href="/search/cs?searchtype=author&amp;query=Chung%2C+V">Verena Chung</a>, <a href="/search/cs?searchtype=author&amp;query=Shinohara%2C+R+T">Russell Takeshi Shinohara</a>, <a href="/search/cs?searchtype=author&amp;query=Dako%2C+F">Farouk Dako</a>, <a href="/search/cs?searchtype=author&amp;query=Wiggins%2C+W">Walter Wiggins</a> , et al. (44 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.09011v6-abstract-short" style="display: inline;"> Automated brain tumor segmentation methods have become well-established and reached performance levels offering clear clinical utility. These methods typically rely on four input magnetic resonance imaging (MRI) modalities: T1-weighted images with and without contrast enhancement, T2-weighted images, and FLAIR images. However, some sequences are often missing in clinical practice due to time const&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.09011v6-abstract-full').style.display = 'inline'; document.getElementById('2305.09011v6-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.09011v6-abstract-full" style="display: none;"> Automated brain tumor segmentation methods have become well-established and reached performance levels offering clear clinical utility. These methods typically rely on four input magnetic resonance imaging (MRI) modalities: T1-weighted images with and without contrast enhancement, T2-weighted images, and FLAIR images. However, some sequences are often missing in clinical practice due to time constraints or image artifacts, such as patient motion. Consequently, the ability to substitute missing modalities and gain segmentation performance is highly desirable and necessary for the broader adoption of these algorithms in the clinical routine. In this work, we present the establishment of the Brain MR Image Synthesis Benchmark (BraSyn) in conjunction with the Medical Image Computing and Computer-Assisted Intervention (MICCAI) 2023. The primary objective of this challenge is to evaluate image synthesis methods that can realistically generate missing MRI modalities when multiple available images are provided. The ultimate aim is to facilitate automated brain tumor segmentation pipelines. The image dataset used in the benchmark is diverse and multi-modal, created through collaboration with various hospitals and research institutions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.09011v6-abstract-full').style.display = 'none'; document.getElementById('2305.09011v6-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Technical report of BraSyn</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.08992">arXiv:2305.08992</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.08992">pdf</a>, <a href="https://arxiv.org/format/2305.08992">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> The Brain Tumor Segmentation (BraTS) Challenge: Local Synthesis of Healthy Brain Tissue via Inpainting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Meissen%2C+F">Felix Meissen</a>, <a href="/search/cs?searchtype=author&amp;query=Steinbauer%2C+F">Felix Steinbauer</a>, <a href="/search/cs?searchtype=author&amp;query=Graf%2C+R">Robert Graf</a>, <a href="/search/cs?searchtype=author&amp;query=Ehrlich%2C+S+K">Stefan K Ehrlich</a>, <a href="/search/cs?searchtype=author&amp;query=Reinke%2C+A">Annika Reinke</a>, <a href="/search/cs?searchtype=author&amp;query=Oswald%2C+E">Eva Oswald</a>, <a href="/search/cs?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/cs?searchtype=author&amp;query=Hoelzl%2C+F">Florian Hoelzl</a>, <a href="/search/cs?searchtype=author&amp;query=Horvath%2C+I">Izabela Horvath</a>, <a href="/search/cs?searchtype=author&amp;query=Turgut%2C+O">Oezguen Turgut</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Bukas%2C+C">Christina Bukas</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=de+da+Rosa%2C+E">Ezequiel de da Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Mekki%2C+I">Isra Mekki</a>, <a href="/search/cs?searchtype=author&amp;query=Vinayahalingam%2C+S">Shankeeth Vinayahalingam</a>, <a href="/search/cs?searchtype=author&amp;query=Kassem%2C+H">Hasan Kassem</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Juexin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+K">Ke Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Weng%2C+Y">Ying Weng</a>, <a href="/search/cs?searchtype=author&amp;query=Durrer%2C+A">Alicia Durrer</a>, <a href="/search/cs?searchtype=author&amp;query=Cattin%2C+P+C">Philippe C. Cattin</a>, <a href="/search/cs?searchtype=author&amp;query=Wolleb%2C+J">Julia Wolleb</a> , et al. (81 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.08992v3-abstract-short" style="display: inline;"> A myriad of algorithms for the automatic analysis of brain MR images is available to support clinicians in their decision-making. For brain tumor patients, the image acquisition time series typically starts with an already pathological scan. This poses problems, as many algorithms are designed to analyze healthy brains and provide no guarantee for images featuring lesions. Examples include, but ar&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.08992v3-abstract-full').style.display = 'inline'; document.getElementById('2305.08992v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.08992v3-abstract-full" style="display: none;"> A myriad of algorithms for the automatic analysis of brain MR images is available to support clinicians in their decision-making. For brain tumor patients, the image acquisition time series typically starts with an already pathological scan. This poses problems, as many algorithms are designed to analyze healthy brains and provide no guarantee for images featuring lesions. Examples include, but are not limited to, algorithms for brain anatomy parcellation, tissue segmentation, and brain extraction. To solve this dilemma, we introduce the BraTS inpainting challenge. Here, the participants explore inpainting techniques to synthesize healthy brain scans from lesioned ones. The following manuscript contains the task formulation, dataset, and submission procedure. Later, it will be updated to summarize the findings of the challenge. The challenge is organized as part of the ASNR-BraTS MICCAI challenge. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.08992v3-abstract-full').style.display = 'none'; document.getElementById('2305.08992v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.07642">arXiv:2305.07642</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.07642">pdf</a>, <a href="https://arxiv.org/format/2305.07642">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> The ASNR-MICCAI Brain Tumor Segmentation (BraTS) Challenge 2023: Intracranial Meningioma </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=LaBella%2C+D">Dominic LaBella</a>, <a href="/search/cs?searchtype=author&amp;query=Adewole%2C+M">Maruf Adewole</a>, <a href="/search/cs?searchtype=author&amp;query=Alonso-Basanta%2C+M">Michelle Alonso-Basanta</a>, <a href="/search/cs?searchtype=author&amp;query=Altes%2C+T">Talissa Altes</a>, <a href="/search/cs?searchtype=author&amp;query=Anwar%2C+S+M">Syed Muhammad Anwar</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Bergquist%2C+T">Timothy Bergquist</a>, <a href="/search/cs?searchtype=author&amp;query=Bhalerao%2C+R">Radhika Bhalerao</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+S">Sully Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chung%2C+V">Verena Chung</a>, <a href="/search/cs?searchtype=author&amp;query=Conte%2C+G">Gian-Marco Conte</a>, <a href="/search/cs?searchtype=author&amp;query=Dako%2C+F">Farouk Dako</a>, <a href="/search/cs?searchtype=author&amp;query=Eddy%2C+J">James Eddy</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Godfrey%2C+D">Devon Godfrey</a>, <a href="/search/cs?searchtype=author&amp;query=Hilal%2C+F">Fathi Hilal</a>, <a href="/search/cs?searchtype=author&amp;query=Familiar%2C+A">Ariana Familiar</a>, <a href="/search/cs?searchtype=author&amp;query=Farahani%2C+K">Keyvan Farahani</a>, <a href="/search/cs?searchtype=author&amp;query=Iglesias%2C+J+E">Juan Eugenio Iglesias</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhifan Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Johanson%2C+E">Elaine Johanson</a>, <a href="/search/cs?searchtype=author&amp;query=Kazerooni%2C+A+F">Anahita Fathi Kazerooni</a>, <a href="/search/cs?searchtype=author&amp;query=Kent%2C+C">Collin Kent</a>, <a href="/search/cs?searchtype=author&amp;query=Kirkpatrick%2C+J">John Kirkpatrick</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a> , et al. (35 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.07642v1-abstract-short" style="display: inline;"> Meningiomas are the most common primary intracranial tumor in adults and can be associated with significant morbidity and mortality. Radiologists, neurosurgeons, neuro-oncologists, and radiation oncologists rely on multiparametric MRI (mpMRI) for diagnosis, treatment planning, and longitudinal treatment monitoring; yet automated, objective, and quantitative tools for non-invasive assessment of men&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.07642v1-abstract-full').style.display = 'inline'; document.getElementById('2305.07642v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.07642v1-abstract-full" style="display: none;"> Meningiomas are the most common primary intracranial tumor in adults and can be associated with significant morbidity and mortality. Radiologists, neurosurgeons, neuro-oncologists, and radiation oncologists rely on multiparametric MRI (mpMRI) for diagnosis, treatment planning, and longitudinal treatment monitoring; yet automated, objective, and quantitative tools for non-invasive assessment of meningiomas on mpMRI are lacking. The BraTS meningioma 2023 challenge will provide a community standard and benchmark for state-of-the-art automated intracranial meningioma segmentation models based on the largest expert annotated multilabel meningioma mpMRI dataset to date. Challenge competitors will develop automated segmentation models to predict three distinct meningioma sub-regions on MRI including enhancing tumor, non-enhancing tumor core, and surrounding nonenhancing T2/FLAIR hyperintensity. Models will be evaluated on separate validation and held-out test datasets using standardized metrics utilized across the BraTS 2023 series of challenges including the Dice similarity coefficient and Hausdorff distance. The models developed during the course of this challenge will aid in incorporation of automated meningioma MRI segmentation into clinical practice, which will ultimately improve care of patients with meningioma. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.07642v1-abstract-full').style.display = 'none'; document.getElementById('2305.07642v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.01601">arXiv:2304.01601</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2304.01601">pdf</a>, <a href="https://arxiv.org/format/2304.01601">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Primitive Simultaneous Optimization of Similarity Metrics for Image Registration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Schwarting%2C+J">Julian Schwarting</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Metz%2C+M">Marie Metz</a>, <a href="/search/cs?searchtype=author&amp;query=Bakas%2C+S">Spyridon Bakas</a>, <a href="/search/cs?searchtype=author&amp;query=Baheti%2C+B">Bhakti Baheti</a>, <a href="/search/cs?searchtype=author&amp;query=Chakrabarty%2C+S">Satrajit Chakrabarty</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a>, <a href="/search/cs?searchtype=author&amp;query=Heckemann%2C+R+A">Rolf A. Heckemann</a>, <a href="/search/cs?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B+H">Bjoern H. Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.01601v3-abstract-short" style="display: inline;"> Even though simultaneous optimization of similarity metrics is a standard procedure in the field of semantic segmentation, surprisingly, this is much less established for image registration. To help closing this gap in the literature, we investigate in a complex multi-modal 3D setting whether simultaneous optimization of registration metrics, here implemented by means of primitive summation, can b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.01601v3-abstract-full').style.display = 'inline'; document.getElementById('2304.01601v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.01601v3-abstract-full" style="display: none;"> Even though simultaneous optimization of similarity metrics is a standard procedure in the field of semantic segmentation, surprisingly, this is much less established for image registration. To help closing this gap in the literature, we investigate in a complex multi-modal 3D setting whether simultaneous optimization of registration metrics, here implemented by means of primitive summation, can benefit image registration. We evaluate two challenging datasets containing collections of pre- to post-operative and pre- to intra-operative MR images of glioma. Employing the proposed optimization, we demonstrate improved registration accuracy in terms of TRE on expert neuroradiologists&#39; landmark annotations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.01601v3-abstract-full').style.display = 'none'; document.getElementById('2304.01601v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Menze%2C+B&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Menze%2C+B&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Menze%2C+B&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Menze%2C+B&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10