CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 142 results for author: <span class="mathjax">Rueckert, D</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/eess" aria-role="search"> Searching in archive <strong>eess</strong>. <a href="/search/?searchtype=author&amp;query=Rueckert%2C+D">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Rueckert, D"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Rueckert%2C+D&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Rueckert, D"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Rueckert%2C+D&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Rueckert%2C+D&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Rueckert%2C+D&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Rueckert%2C+D&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.18834">arXiv:2410.18834</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.18834">pdf</a>, <a href="https://arxiv.org/ps/2410.18834">ps</a>, <a href="https://arxiv.org/format/2410.18834">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Highly efficient non-rigid registration in k-space with application to cardiac Magnetic Resonance Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Ghoul%2C+A">Aya Ghoul</a>, <a href="/search/eess?searchtype=author&amp;query=Hammernik%2C+K">Kerstin Hammernik</a>, <a href="/search/eess?searchtype=author&amp;query=Lingg%2C+A">Andreas Lingg</a>, <a href="/search/eess?searchtype=author&amp;query=Krumm%2C+P">Patrick Krumm</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Gatidis%2C+S">Sergios Gatidis</a>, <a href="/search/eess?searchtype=author&amp;query=K%C3%BCstner%2C+T">Thomas K眉stner</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.18834v1-abstract-short" style="display: inline;"> In Magnetic Resonance Imaging (MRI), high temporal-resolved motion can be useful for image acquisition and reconstruction, MR-guided radiotherapy, dynamic contrast-enhancement, flow and perfusion imaging, and functional assessment of motion patterns in cardiovascular, abdominal, peristaltic, fetal, or musculoskeletal imaging. Conventionally, these motion estimates are derived through image-based r&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18834v1-abstract-full').style.display = 'inline'; document.getElementById('2410.18834v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.18834v1-abstract-full" style="display: none;"> In Magnetic Resonance Imaging (MRI), high temporal-resolved motion can be useful for image acquisition and reconstruction, MR-guided radiotherapy, dynamic contrast-enhancement, flow and perfusion imaging, and functional assessment of motion patterns in cardiovascular, abdominal, peristaltic, fetal, or musculoskeletal imaging. Conventionally, these motion estimates are derived through image-based registration, a particularly challenging task for complex motion patterns and high dynamic resolution. The accelerated scans in such applications result in imaging artifacts that compromise the motion estimation. In this work, we propose a novel self-supervised deep learning-based framework, dubbed the Local-All Pass Attention Network (LAPANet), for non-rigid motion estimation directly from the acquired accelerated Fourier space, i.e. k-space. The proposed approach models non-rigid motion as the cumulative sum of local translational displacements, following the Local All-Pass (LAP) registration technique. LAPANet was evaluated on cardiac motion estimation across various sampling trajectories and acceleration rates. Our results demonstrate superior accuracy compared to prior conventional and deep learning-based registration methods, accommodating as few as 2 lines/frame in a Cartesian trajectory and 3 spokes/frame in a non-Cartesian trajectory. The achieved high temporal resolution (less than 5 ms) for non-rigid motion opens new avenues for motion detection, tracking and correction in dynamic and real-time MRI applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18834v1-abstract-full').style.display = 'none'; document.getElementById('2410.18834v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.01665">arXiv:2410.01665</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.01665">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Towards a vision foundation model for comprehensive assessment of Cardiac MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Jacob%2C+A+J">Athira J Jacob</a>, <a href="/search/eess?searchtype=author&amp;query=Borgohain%2C+I">Indraneel Borgohain</a>, <a href="/search/eess?searchtype=author&amp;query=Chitiboi%2C+T">Teodora Chitiboi</a>, <a href="/search/eess?searchtype=author&amp;query=Sharma%2C+P">Puneet Sharma</a>, <a href="/search/eess?searchtype=author&amp;query=Comaniciu%2C+D">Dorin Comaniciu</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.01665v2-abstract-short" style="display: inline;"> Cardiac magnetic resonance imaging (CMR), considered the gold standard for noninvasive cardiac assessment, is a diverse and complex modality requiring a wide variety of image processing tasks for comprehensive assessment of cardiac morphology and function. Advances in deep learning have enabled the development of state-of-the-art (SoTA) models for these tasks. However, model training is challengin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01665v2-abstract-full').style.display = 'inline'; document.getElementById('2410.01665v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.01665v2-abstract-full" style="display: none;"> Cardiac magnetic resonance imaging (CMR), considered the gold standard for noninvasive cardiac assessment, is a diverse and complex modality requiring a wide variety of image processing tasks for comprehensive assessment of cardiac morphology and function. Advances in deep learning have enabled the development of state-of-the-art (SoTA) models for these tasks. However, model training is challenging due to data and label scarcity, especially in the less common imaging sequences. Moreover, each model is often trained for a specific task, with no connection between related tasks. In this work, we introduce a vision foundation model trained for CMR assessment, that is trained in a self-supervised fashion on 36 million CMR images. We then finetune the model in supervised way for 9 clinical tasks typical to a CMR workflow, across classification, segmentation, landmark localization, and pathology detection. We demonstrate improved accuracy and robustness across all tasks, over a range of available labeled dataset sizes. We also demonstrate improved few-shot learning with fewer labeled samples, a common challenge in medical image analyses. We achieve an out-of-box performance comparable to SoTA for most clinical tasks. The proposed method thus presents a resource-efficient, unified framework for CMR assessment, with the potential to accelerate the development of deep learning-based solutions for image analysis tasks, even with few annotated data available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01665v2-abstract-full').style.display = 'none'; document.getElementById('2410.01665v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 3 figures, 4 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.09796">arXiv:2409.09796</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.09796">pdf</a>, <a href="https://arxiv.org/format/2409.09796">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Universal Topology Refinement for Medical Image Segmentation with Polynomial Feature Synthesis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Li%2C+L">Liu Li</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+H">Hanchun Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Baugh%2C+M">Matthew Baugh</a>, <a href="/search/eess?searchtype=author&amp;query=Ma%2C+Q">Qiang Ma</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+W">Weitong Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Ouyang%2C+C">Cheng Ouyang</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.09796v1-abstract-short" style="display: inline;"> Although existing medical image segmentation methods provide impressive pixel-wise accuracy, they often neglect topological correctness, making their segmentations unusable for many downstream tasks. One option is to retrain such models whilst including a topology-driven loss component. However, this is computationally expensive and often impractical. A better solution would be to have a versatile&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09796v1-abstract-full').style.display = 'inline'; document.getElementById('2409.09796v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.09796v1-abstract-full" style="display: none;"> Although existing medical image segmentation methods provide impressive pixel-wise accuracy, they often neglect topological correctness, making their segmentations unusable for many downstream tasks. One option is to retrain such models whilst including a topology-driven loss component. However, this is computationally expensive and often impractical. A better solution would be to have a versatile plug-and-play topology refinement method that is compatible with any domain-specific segmentation pipeline. Directly training a post-processing model to mitigate topological errors often fails as such models tend to be biased towards the topological errors of a target segmentation network. The diversity of these errors is confined to the information provided by a labelled training set, which is especially problematic for small datasets. Our method solves this problem by training a model-agnostic topology refinement network with synthetic segmentations that cover a wide variety of topological errors. Inspired by the Stone-Weierstrass theorem, we synthesize topology-perturbation masks with randomly sampled coefficients of orthogonal polynomial bases, which ensures a complete and unbiased representation. Practically, we verified the efficiency and effectiveness of our methods as being compatible with multiple families of polynomial bases, and show evidence that our universal plug-and-play topology refinement network outperforms both existing topology-driven learning-based and post-processing methods. We also show that combining our method with learning-based models provides an effortless add-on, which can further improve the performance of existing approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09796v1-abstract-full').style.display = 'none'; document.getElementById('2409.09796v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by the 27th International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.09387">arXiv:2409.09387</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.09387">pdf</a>, <a href="https://arxiv.org/format/2409.09387">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Estimating Neural Orientation Distribution Fields on High Resolution Diffusion MRI Scans </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Dwedari%2C+M+M">Mohammed Munzer Dwedari</a>, <a href="/search/eess?searchtype=author&amp;query=Consagra%2C+W">William Consagra</a>, <a href="/search/eess?searchtype=author&amp;query=M%C3%BCller%2C+P">Philip M眉ller</a>, <a href="/search/eess?searchtype=author&amp;query=Turgut%2C+%C3%96">脰zg眉n Turgut</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Rathi%2C+Y">Yogesh Rathi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.09387v1-abstract-short" style="display: inline;"> The Orientation Distribution Function (ODF) characterizes key brain microstructural properties and plays an important role in understanding brain structural connectivity. Recent works introduced Implicit Neural Representation (INR) based approaches to form a spatially aware continuous estimate of the ODF field and demonstrated promising results in key tasks of interest when compared to conventiona&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09387v1-abstract-full').style.display = 'inline'; document.getElementById('2409.09387v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.09387v1-abstract-full" style="display: none;"> The Orientation Distribution Function (ODF) characterizes key brain microstructural properties and plays an important role in understanding brain structural connectivity. Recent works introduced Implicit Neural Representation (INR) based approaches to form a spatially aware continuous estimate of the ODF field and demonstrated promising results in key tasks of interest when compared to conventional discrete approaches. However, traditional INR methods face difficulties when scaling to large-scale images, such as modern ultra-high-resolution MRI scans, posing challenges in learning fine structures as well as inefficiencies in training and inference speed. In this work, we propose HashEnc, a grid-hash-encoding-based estimation of the ODF field and demonstrate its effectiveness in retaining structural and textural features. We show that HashEnc achieves a 10% enhancement in image quality while requiring 3x less computational resources than current methods. Our code can be found at https://github.com/MunzerDw/NODF-HashEnc. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09387v1-abstract-full').style.display = 'none'; document.getElementById('2409.09387v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 8 figures, conference: Medical Image Computing and Computer-Assisted Intervention (MICCAI)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.20108">arXiv:2407.20108</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.20108">pdf</a>, <a href="https://arxiv.org/format/2407.20108">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Classification, Regression and Segmentation directly from k-Space in Cardiac MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Li%2C+R">Ruochen Li</a>, <a href="/search/eess?searchtype=author&amp;query=Pan%2C+J">Jiazhen Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Zhu%2C+Y">Youxiang Zhu</a>, <a href="/search/eess?searchtype=author&amp;query=Ni%2C+J">Juncheng Ni</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.20108v1-abstract-short" style="display: inline;"> Cardiac Magnetic Resonance Imaging (CMR) is the gold standard for diagnosing cardiovascular diseases. Clinical diagnoses predominantly rely on magnitude-only Digital Imaging and Communications in Medicine (DICOM) images, omitting crucial phase information that might provide additional diagnostic benefits. In contrast, k-space is complex-valued and encompasses both magnitude and phase information,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.20108v1-abstract-full').style.display = 'inline'; document.getElementById('2407.20108v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.20108v1-abstract-full" style="display: none;"> Cardiac Magnetic Resonance Imaging (CMR) is the gold standard for diagnosing cardiovascular diseases. Clinical diagnoses predominantly rely on magnitude-only Digital Imaging and Communications in Medicine (DICOM) images, omitting crucial phase information that might provide additional diagnostic benefits. In contrast, k-space is complex-valued and encompasses both magnitude and phase information, while humans cannot directly perceive. In this work, we propose KMAE, a Transformer-based model specifically designed to process k-space data directly, eliminating conventional intermediary conversion steps to the image domain. KMAE can handle critical cardiac disease classification, relevant phenotype regression, and cardiac morphology segmentation tasks. We utilize this model to investigate the potential of k-space-based diagnosis in cardiac MRI. Notably, this model achieves competitive classification and regression performance compared to image-domain methods e.g. Masked Autoencoders (MAEs) and delivers satisfactory segmentation performance with a myocardium dice score of 0.884. Last but not least, our model exhibits robust performance with consistent results even when the k-space is 8* undersampled. We encourage the MR community to explore the untapped potential of k-space and pursue end-to-end, automated diagnosis with reduced human intervention. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.20108v1-abstract-full').style.display = 'none'; document.getElementById('2407.20108v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.19274">arXiv:2407.19274</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.19274">pdf</a>, <a href="https://arxiv.org/format/2407.19274">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Mamba? Catch The Hype Or Rethink What Really Helps for Image Registration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Jian%2C+B">Bailiang Jian</a>, <a href="/search/eess?searchtype=author&amp;query=Pan%2C+J">Jiazhen Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Ghahremani%2C+M">Morteza Ghahremani</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Wachinger%2C+C">Christian Wachinger</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.19274v1-abstract-short" style="display: inline;"> Our findings indicate that adopting &#34;advanced&#34; computational elements fails to significantly improve registration accuracy. Instead, well-established registration-specific designs offer fair improvements, enhancing results by a marginal 1.5\% over the baseline. Our findings emphasize the importance of rigorous, unbiased evaluation and contribution disentanglement of all low- and high-level registr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19274v1-abstract-full').style.display = 'inline'; document.getElementById('2407.19274v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.19274v1-abstract-full" style="display: none;"> Our findings indicate that adopting &#34;advanced&#34; computational elements fails to significantly improve registration accuracy. Instead, well-established registration-specific designs offer fair improvements, enhancing results by a marginal 1.5\% over the baseline. Our findings emphasize the importance of rigorous, unbiased evaluation and contribution disentanglement of all low- and high-level registration components, rather than simply following the computer vision trends with &#34;more advanced&#34; computational blocks. We advocate for simpler yet effective solutions and novel evaluation metrics that go beyond conventional registration accuracy, warranting further research across diverse organs and modalities. The code is available at \url{https://github.com/BailiangJ/rethink-reg}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19274v1-abstract-full').style.display = 'none'; document.getElementById('2407.19274v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">WBIR 2024 Workshop on Biomedical Imaging Registration</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.03863">arXiv:2407.03863</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.03863">pdf</a>, <a href="https://arxiv.org/format/2407.03863">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Unsupervised Analysis of Alzheimer&#39;s Disease Signatures using 3D Deformable Autoencoders </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Avci%2C+M+Y">Mehmet Yigit Avci</a>, <a href="/search/eess?searchtype=author&amp;query=Chan%2C+E">Emily Chan</a>, <a href="/search/eess?searchtype=author&amp;query=Zimmer%2C+V">Veronika Zimmer</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a>, <a href="/search/eess?searchtype=author&amp;query=Bercea%2C+C+I">Cosmin I. Bercea</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.03863v1-abstract-short" style="display: inline;"> With the increasing incidence of neurodegenerative diseases such as Alzheimer&#39;s Disease (AD), there is a need for further research that enhances detection and monitoring of the diseases. We present MORPHADE (Morphological Autoencoders for Alzheimer&#39;s Disease Detection), a novel unsupervised learning approach which uses deformations to allow the analysis of 3D T1-weighted brain images. To the best&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.03863v1-abstract-full').style.display = 'inline'; document.getElementById('2407.03863v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.03863v1-abstract-full" style="display: none;"> With the increasing incidence of neurodegenerative diseases such as Alzheimer&#39;s Disease (AD), there is a need for further research that enhances detection and monitoring of the diseases. We present MORPHADE (Morphological Autoencoders for Alzheimer&#39;s Disease Detection), a novel unsupervised learning approach which uses deformations to allow the analysis of 3D T1-weighted brain images. To the best of our knowledge, this is the first use of deformations with deep unsupervised learning to not only detect, but also localize and assess the severity of structural changes in the brain due to AD. We obtain markedly higher anomaly scores in clinically important areas of the brain in subjects with AD compared to healthy controls, showcasing that our method is able to effectively locate AD-related atrophy. We additionally observe a visual correlation between the severity of atrophy highlighted in our anomaly maps and medial temporal lobe atrophy scores evaluated by a clinical expert. Finally, our method achieves an AUROC of 0.80 in detecting AD, out-performing several supervised and unsupervised baselines. We believe our framework shows promise as a tool towards improved understanding, monitoring and detection of AD. To support further research and application, we have made our code publicly available at github.com/ci-ber/MORPHADE. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.03863v1-abstract-full').style.display = 'none'; document.getElementById('2407.03863v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.03034">arXiv:2407.03034</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.03034">pdf</a>, <a href="https://arxiv.org/ps/2407.03034">ps</a>, <a href="https://arxiv.org/format/2407.03034">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Attention Incorporated Network for Sharing Low-rank, Image and K-space Information during MR Image Reconstruction to Achieve Single Breath-hold Cardiac Cine Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Xu%2C+S">Siying Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Hammernik%2C+K">Kerstin Hammernik</a>, <a href="/search/eess?searchtype=author&amp;query=Lingg%2C+A">Andreas Lingg</a>, <a href="/search/eess?searchtype=author&amp;query=Kuebler%2C+J">Jens Kuebler</a>, <a href="/search/eess?searchtype=author&amp;query=Krumm%2C+P">Patrick Krumm</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Gatidis%2C+S">Sergios Gatidis</a>, <a href="/search/eess?searchtype=author&amp;query=Kuestner%2C+T">Thomas Kuestner</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.03034v1-abstract-short" style="display: inline;"> Cardiac Cine Magnetic Resonance Imaging (MRI) provides an accurate assessment of heart morphology and function in clinical practice. However, MRI requires long acquisition times, with recent deep learning-based methods showing great promise to accelerate imaging and enhance reconstruction quality. Existing networks exhibit some common limitations that constrain further acceleration possibilities,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.03034v1-abstract-full').style.display = 'inline'; document.getElementById('2407.03034v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.03034v1-abstract-full" style="display: none;"> Cardiac Cine Magnetic Resonance Imaging (MRI) provides an accurate assessment of heart morphology and function in clinical practice. However, MRI requires long acquisition times, with recent deep learning-based methods showing great promise to accelerate imaging and enhance reconstruction quality. Existing networks exhibit some common limitations that constrain further acceleration possibilities, including single-domain learning, reliance on a single regularization term, and equal feature contribution. To address these limitations, we propose to embed information from multiple domains, including low-rank, image, and k-space, in a novel deep learning network for MRI reconstruction, which we denote as A-LIKNet. A-LIKNet adopts a parallel-branch structure, enabling independent learning in the k-space and image domain. Coupled information sharing layers realize the information exchange between domains. Furthermore, we introduce attention mechanisms into the network to assign greater weights to more critical coils or important temporal frames. Training and testing were conducted on an in-house dataset, including 91 cardiovascular patients and 38 healthy subjects scanned with 2D cardiac Cine using retrospective undersampling. Additionally, we evaluated A-LIKNet on the real-time 8x prospectively undersampled data from the OCMR dataset. The results demonstrate that our proposed A-LIKNet outperforms existing methods and provides high-quality reconstructions. The network can effectively reconstruct highly retrospectively undersampled dynamic MR images up to 24x accelerations, indicating its potential for single breath-hold imaging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.03034v1-abstract-full').style.display = 'none'; document.getElementById('2407.03034v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.00186">arXiv:2407.00186</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.00186">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> DCSM 2.0: Deep Conditional Shape Models for Data Efficient Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Jacob%2C+A+J">Athira J Jacob</a>, <a href="/search/eess?searchtype=author&amp;query=Sharma%2C+P">Puneet Sharma</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.00186v1-abstract-short" style="display: inline;"> Segmentation is often the first step in many medical image analyses workflows. Deep learning approaches, while giving state-of-the-art accuracies, are data intensive and do not scale well to low data regimes. We introduce Deep Conditional Shape Models 2.0, which uses an edge detector, along with an implicit shape function conditioned on edge maps, to leverage cross-modality shape information. The&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.00186v1-abstract-full').style.display = 'inline'; document.getElementById('2407.00186v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.00186v1-abstract-full" style="display: none;"> Segmentation is often the first step in many medical image analyses workflows. Deep learning approaches, while giving state-of-the-art accuracies, are data intensive and do not scale well to low data regimes. We introduce Deep Conditional Shape Models 2.0, which uses an edge detector, along with an implicit shape function conditioned on edge maps, to leverage cross-modality shape information. The shape function is trained exclusively on a source domain (contrasted CT) and applied to the target domain of interest (3D echocardiography). We demonstrate data efficiency in the target domain by varying the amounts of training data used in the edge detection stage. We observe that DCSM 2.0 outperforms the baseline at all data levels in terms of Hausdorff distances, and while using 50% or less of the training data in terms of average mesh distance, and at 10% or less of the data with the dice coefficient. The method scales well to low data regimes, with gains of up to 5% in dice coefficient, 2.58 mm in average surface distance and 21.02 mm in Hausdorff distance when using just 2% (22 volumes) of the training data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.00186v1-abstract-full').style.display = 'none'; document.getElementById('2407.00186v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Best oral paper award at ISBI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.12650">arXiv:2406.12650</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.12650">pdf</a>, <a href="https://arxiv.org/format/2406.12650">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Weakly Supervised Learning of Cortical Surface Reconstruction from Segmentations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Ma%2C+Q">Qiang Ma</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+L">Liu Li</a>, <a href="/search/eess?searchtype=author&amp;query=Robinson%2C+E+C">Emma C. Robinson</a>, <a href="/search/eess?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.12650v1-abstract-short" style="display: inline;"> Existing learning-based cortical surface reconstruction approaches heavily rely on the supervision of pseudo ground truth (pGT) cortical surfaces for training. Such pGT surfaces are generated by traditional neuroimage processing pipelines, which are time consuming and difficult to generalize well to low-resolution brain MRI, e.g., from fetuses and neonates. In this work, we present CoSeg, a learni&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.12650v1-abstract-full').style.display = 'inline'; document.getElementById('2406.12650v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.12650v1-abstract-full" style="display: none;"> Existing learning-based cortical surface reconstruction approaches heavily rely on the supervision of pseudo ground truth (pGT) cortical surfaces for training. Such pGT surfaces are generated by traditional neuroimage processing pipelines, which are time consuming and difficult to generalize well to low-resolution brain MRI, e.g., from fetuses and neonates. In this work, we present CoSeg, a learning-based cortical surface reconstruction framework weakly supervised by brain segmentations without the need for pGT surfaces. CoSeg introduces temporal attention networks to learn time-varying velocity fields from brain MRI for diffeomorphic surface deformations, which fit an initial surface to target cortical surfaces within only 0.11 seconds for each brain hemisphere. A weakly supervised loss is designed to reconstruct pial surfaces by inflating the white surface along the normal direction towards the boundary of the cortical gray matter segmentation. This alleviates partial volume effects and encourages the pial surface to deform into deep and challenging cortical sulci. We evaluate CoSeg on 1,113 adult brain MRI at 1mm and 2mm resolution. CoSeg achieves superior geometric and morphological accuracy compared to existing learning-based approaches. We also verify that CoSeg can extract high-quality cortical surfaces from fetal brain MRI on which traditional pipelines fail to produce acceptable results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.12650v1-abstract-full').style.display = 'none'; document.getElementById('2406.12650v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by the 27th International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.06537">arXiv:2406.06537</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.06537">pdf</a>, <a href="https://arxiv.org/format/2406.06537">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Interactive Generation of Laparoscopic Videos with Diffusion Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Iliash%2C+I">Ivan Iliash</a>, <a href="/search/eess?searchtype=author&amp;query=Allmendinger%2C+S">Simeon Allmendinger</a>, <a href="/search/eess?searchtype=author&amp;query=Meissen%2C+F">Felix Meissen</a>, <a href="/search/eess?searchtype=author&amp;query=K%C3%BChl%2C+N">Niklas K眉hl</a>, <a href="/search/eess?searchtype=author&amp;query=R%C3%BCckert%2C+D">Daniel R眉ckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.06537v1-abstract-short" style="display: inline;"> Generative AI, in general, and synthetic visual data generation, in specific, hold much promise for benefiting surgical training by providing photorealism to simulation environments. Current training methods primarily rely on reading materials and observing live surgeries, which can be time-consuming and impractical. In this work, we take a significant step towards improving the training process.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.06537v1-abstract-full').style.display = 'inline'; document.getElementById('2406.06537v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.06537v1-abstract-full" style="display: none;"> Generative AI, in general, and synthetic visual data generation, in specific, hold much promise for benefiting surgical training by providing photorealism to simulation environments. Current training methods primarily rely on reading materials and observing live surgeries, which can be time-consuming and impractical. In this work, we take a significant step towards improving the training process. Specifically, we use diffusion models in combination with a zero-shot video diffusion method to interactively generate realistic laparoscopic images and videos by specifying a surgical action through text and guiding the generation with tool positions through segmentation masks. We demonstrate the performance of our approach using the publicly available Cholec dataset family and evaluate the fidelity and factual correctness of our generated images using a surgical action recognition model as well as the pixel-wise F1-score for the spatial control of tool generation. We achieve an FID of 38.097 and an F1-score of 0.71. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.06537v1-abstract-full').style.display = 'none'; document.getElementById('2406.06537v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.04769">arXiv:2406.04769</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.04769">pdf</a>, <a href="https://arxiv.org/format/2406.04769">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Diffusion-based Generative Image Outpainting for Recovery of FOV-Truncated CT Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Liman%2C+M+E">Michelle Espranita Liman</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Fintelmann%2C+F+J">Florian J. Fintelmann</a>, <a href="/search/eess?searchtype=author&amp;query=M%C3%BCller%2C+P">Philip M眉ller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.04769v2-abstract-short" style="display: inline;"> Field-of-view (FOV) recovery of truncated chest CT scans is crucial for accurate body composition analysis, which involves quantifying skeletal muscle and subcutaneous adipose tissue (SAT) on CT slices. This, in turn, enables disease prognostication. Here, we present a method for recovering truncated CT slices using generative image outpainting. We train a diffusion model and apply it to truncated&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.04769v2-abstract-full').style.display = 'inline'; document.getElementById('2406.04769v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.04769v2-abstract-full" style="display: none;"> Field-of-view (FOV) recovery of truncated chest CT scans is crucial for accurate body composition analysis, which involves quantifying skeletal muscle and subcutaneous adipose tissue (SAT) on CT slices. This, in turn, enables disease prognostication. Here, we present a method for recovering truncated CT slices using generative image outpainting. We train a diffusion model and apply it to truncated CT slices generated by simulating a small FOV. Our model reliably recovers the truncated anatomy and outperforms the previous state-of-the-art despite being trained on 87% less data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.04769v2-abstract-full').style.display = 'none'; document.getElementById('2406.04769v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Shared last authorship: Florian J. Fintelmann and Philip M眉ller</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.00329">arXiv:2406.00329</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.00329">pdf</a>, <a href="https://arxiv.org/format/2406.00329">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Whole Heart 3D+T Representation Learning Through Sparse 2D Cardiac MR Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+Y">Yundi Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+C">Chen Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/eess?searchtype=author&amp;query=Starck%2C+S">Sophie Starck</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Pan%2C+J">Jiazhen Pan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.00329v2-abstract-short" style="display: inline;"> Cardiac Magnetic Resonance (CMR) imaging serves as the gold-standard for evaluating cardiac morphology and function. Typically, a multi-view CMR stack, covering short-axis (SA) and 2/3/4-chamber long-axis (LA) views, is acquired for a thorough cardiac assessment. However, efficiently streamlining the complex, high-dimensional 3D+T CMR data and distilling compact, coherent representation remains a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.00329v2-abstract-full').style.display = 'inline'; document.getElementById('2406.00329v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.00329v2-abstract-full" style="display: none;"> Cardiac Magnetic Resonance (CMR) imaging serves as the gold-standard for evaluating cardiac morphology and function. Typically, a multi-view CMR stack, covering short-axis (SA) and 2/3/4-chamber long-axis (LA) views, is acquired for a thorough cardiac assessment. However, efficiently streamlining the complex, high-dimensional 3D+T CMR data and distilling compact, coherent representation remains a challenge. In this work, we introduce a whole-heart self-supervised learning framework that utilizes masked imaging modeling to automatically uncover the correlations between spatial and temporal patches throughout the cardiac stacks. This process facilitates the generation of meaningful and well-clustered heart representations without relying on the traditionally required, and often costly, labeled data. The learned heart representation can be directly used for various downstream tasks. Furthermore, our method demonstrates remarkable robustness, ensuring consistent representations even when certain CMR planes are missing/flawed. We train our model on 14,000 unlabeled CMR data from UK BioBank and evaluate it on 1,000 annotated data. The proposed method demonstrates superior performance to baselines in tasks that demand comprehensive 3D+T cardiac information, e.g. cardiac phenotype (ejection fraction and ventricle volume) prediction and multi-plane/multi-frame CMR segmentation, highlighting its effectiveness in extracting comprehensive cardiac features that are both anatomically and pathologically relevant. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.00329v2-abstract-full').style.display = 'none'; document.getElementById('2406.00329v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.00192">arXiv:2406.00192</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.00192">pdf</a>, <a href="https://arxiv.org/format/2406.00192">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Direct Cardiac Segmentation from Undersampled K-space Using Transformers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+Y">Yundi Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Stolt-Ans%C3%B3%2C+N">Nil Stolt-Ans贸</a>, <a href="/search/eess?searchtype=author&amp;query=Pan%2C+J">Jiazhen Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Huang%2C+W">Wenqi Huang</a>, <a href="/search/eess?searchtype=author&amp;query=Hammernik%2C+K">Kerstin Hammernik</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.00192v1-abstract-short" style="display: inline;"> The prevailing deep learning-based methods of predicting cardiac segmentation involve reconstructed magnetic resonance (MR) images. The heavy dependency of segmentation approaches on image quality significantly limits the acceleration rate in fast MR reconstruction. Moreover, the practice of treating reconstruction and segmentation as separate sequential processes leads to artifact generation and&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.00192v1-abstract-full').style.display = 'inline'; document.getElementById('2406.00192v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.00192v1-abstract-full" style="display: none;"> The prevailing deep learning-based methods of predicting cardiac segmentation involve reconstructed magnetic resonance (MR) images. The heavy dependency of segmentation approaches on image quality significantly limits the acceleration rate in fast MR reconstruction. Moreover, the practice of treating reconstruction and segmentation as separate sequential processes leads to artifact generation and information loss in the intermediate stage. These issues pose a great risk to achieving high-quality outcomes. To leverage the redundant k-space information overlooked in this dual-step pipeline, we introduce a novel approach to directly deriving segmentations from sparse k-space samples using a transformer (DiSK). DiSK operates by globally extracting latent features from 2D+time k-space data with attention blocks and subsequently predicting the segmentation label of query points. We evaluate our model under various acceleration factors (ranging from 4 to 64) and compare against two image-based segmentation baselines. Our model consistently outperforms the baselines in Dice and Hausdorff distances across foreground classes for all presented sampling rates. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.00192v1-abstract-full').style.display = 'none'; document.getElementById('2406.00192v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.00125">arXiv:2406.00125</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.00125">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> TotalVibeSegmentator: Full Body MRI Segmentation for the NAKO and UK Biobank </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Graf%2C+R">Robert Graf</a>, <a href="/search/eess?searchtype=author&amp;query=Platzek%2C+P">Paul-S枚ren Platzek</a>, <a href="/search/eess?searchtype=author&amp;query=Riedel%2C+E+O">Evamaria Olga Riedel</a>, <a href="/search/eess?searchtype=author&amp;query=Ramsch%C3%BCtz%2C+C">Constanze Ramsch眉tz</a>, <a href="/search/eess?searchtype=author&amp;query=Starck%2C+S">Sophie Starck</a>, <a href="/search/eess?searchtype=author&amp;query=M%C3%B6ller%2C+H+K">Hendrik Kristian M枚ller</a>, <a href="/search/eess?searchtype=author&amp;query=Atad%2C+M">Matan Atad</a>, <a href="/search/eess?searchtype=author&amp;query=V%C3%B6lzke%2C+H">Henry V枚lzke</a>, <a href="/search/eess?searchtype=author&amp;query=B%C3%BClow%2C+R">Robin B眉low</a>, <a href="/search/eess?searchtype=author&amp;query=Schmidt%2C+C+O">Carsten Oliver Schmidt</a>, <a href="/search/eess?searchtype=author&amp;query=R%C3%BCdebusch%2C+J">Julia R眉debusch</a>, <a href="/search/eess?searchtype=author&amp;query=Jung%2C+M">Matthias Jung</a>, <a href="/search/eess?searchtype=author&amp;query=Reisert%2C+M">Marco Reisert</a>, <a href="/search/eess?searchtype=author&amp;query=Weiss%2C+J">Jakob Weiss</a>, <a href="/search/eess?searchtype=author&amp;query=L%C3%B6ffler%2C+M">Maximilian L枚ffler</a>, <a href="/search/eess?searchtype=author&amp;query=Bamberg%2C+F">Fabian Bamberg</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Bene Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Kirschke%2C+J+S">Jan Stefan Kirschke</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.00125v3-abstract-short" style="display: inline;"> Objectives: To present a publicly available torso segmentation network for large epidemiology datasets on volumetric interpolated breath-hold examination (VIBE) images. Materials &amp; Methods: We extracted preliminary segmentations from TotalSegmentator, spine, and body composition networks for VIBE images, then improved them iteratively and retrained a nnUNet network. Using subsets of NAKO (85 subje&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.00125v3-abstract-full').style.display = 'inline'; document.getElementById('2406.00125v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.00125v3-abstract-full" style="display: none;"> Objectives: To present a publicly available torso segmentation network for large epidemiology datasets on volumetric interpolated breath-hold examination (VIBE) images. Materials &amp; Methods: We extracted preliminary segmentations from TotalSegmentator, spine, and body composition networks for VIBE images, then improved them iteratively and retrained a nnUNet network. Using subsets of NAKO (85 subjects) and UK Biobank (16 subjects), we evaluated with Dice-score on a holdout set (12 subjects) and existing organ segmentation approach (1000 subjects), generating 71 semantic segmentation types for VIBE images. We provide an additional network for the vertebra segments 22 individual vertebra types. Results: We achieved an average Dice score of 0.89 +- 0.07 overall 71 segmentation labels. We scored &gt; 0.90 Dice-score on the abdominal organs except for the pancreas with a Dice of 0.70. Conclusion: Our work offers a detailed and refined publicly available full torso segmentation on VIBE images. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.00125v3-abstract-full').style.display = 'none'; document.getElementById('2406.00125v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">https://github.com/robert-graf/TotalVibeSegmentator</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.09549">arXiv:2405.09549</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.09549">pdf</a>, <a href="https://arxiv.org/format/2405.09549">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Deep-learning-based clustering of OCT images for biomarker discovery in age-related macular degeneration (Pinnacle study report 4) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Holland%2C+R">Robbie Holland</a>, <a href="/search/eess?searchtype=author&amp;query=Kaye%2C+R">Rebecca Kaye</a>, <a href="/search/eess?searchtype=author&amp;query=Hagag%2C+A+M">Ahmed M. Hagag</a>, <a href="/search/eess?searchtype=author&amp;query=Leingang%2C+O">Oliver Leingang</a>, <a href="/search/eess?searchtype=author&amp;query=Taylor%2C+T+R+P">Thomas R. P. Taylor</a>, <a href="/search/eess?searchtype=author&amp;query=Bogunovi%C4%87%2C+H">Hrvoje Bogunovi膰</a>, <a href="/search/eess?searchtype=author&amp;query=Schmidt-Erfurth%2C+U">Ursula Schmidt-Erfurth</a>, <a href="/search/eess?searchtype=author&amp;query=Scholl%2C+H+P+N">Hendrik P. N. Scholl</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Lotery%2C+A+J">Andrew J. Lotery</a>, <a href="/search/eess?searchtype=author&amp;query=Sivaprasad%2C+S">Sobha Sivaprasad</a>, <a href="/search/eess?searchtype=author&amp;query=Menten%2C+M+J">Martin J. Menten</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.09549v1-abstract-short" style="display: inline;"> Diseases are currently managed by grading systems, where patients are stratified by grading systems into stages that indicate patient risk and guide clinical management. However, these broad categories typically lack prognostic value, and proposals for new biomarkers are currently limited to anecdotal observations. In this work, we introduce a deep-learning-based biomarker proposal system for the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.09549v1-abstract-full').style.display = 'inline'; document.getElementById('2405.09549v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.09549v1-abstract-full" style="display: none;"> Diseases are currently managed by grading systems, where patients are stratified by grading systems into stages that indicate patient risk and guide clinical management. However, these broad categories typically lack prognostic value, and proposals for new biomarkers are currently limited to anecdotal observations. In this work, we introduce a deep-learning-based biomarker proposal system for the purpose of accelerating biomarker discovery in age-related macular degeneration (AMD). It works by first training a neural network using self-supervised contrastive learning to discover, without any clinical annotations, features relating to both known and unknown AMD biomarkers present in 46,496 retinal optical coherence tomography (OCT) images. To interpret the discovered biomarkers, we partition the images into 30 subsets, termed clusters, that contain similar features. We then conduct two parallel 1.5-hour semi-structured interviews with two independent teams of retinal specialists that describe each cluster in clinical language. Overall, both teams independently identified clearly distinct characteristics in 27 of 30 clusters, of which 23 were related to AMD. Seven were recognised as known biomarkers already used in established grading systems and 16 depicted biomarker combinations or subtypes that are either not yet used in grading systems, were only recently proposed, or were unknown. Clusters separated incomplete from complete retinal atrophy, intraretinal from subretinal fluid and thick from thin choroids, and in simulation outperformed clinically-used grading systems in prognostic value. Overall, contrastive learning enabled the automatic proposal of AMD biomarkers that go beyond the set used by clinically established grading systems. Ultimately, we envision that equipping clinicians with discovery-oriented deep-learning tools can accelerate discovery of novel prognostic biomarkers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.09549v1-abstract-full').style.display = 'none'; document.getElementById('2405.09549v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.08783">arXiv:2405.08783</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.08783">pdf</a>, <a href="https://arxiv.org/format/2405.08783">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> The Developing Human Connectome Project: A Fast Deep Learning-based Pipeline for Neonatal Cortical Surface Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Ma%2C+Q">Qiang Ma</a>, <a href="/search/eess?searchtype=author&amp;query=Liang%2C+K">Kaili Liang</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+L">Liu Li</a>, <a href="/search/eess?searchtype=author&amp;query=Masui%2C+S">Saga Masui</a>, <a href="/search/eess?searchtype=author&amp;query=Guo%2C+Y">Yourong Guo</a>, <a href="/search/eess?searchtype=author&amp;query=Nosarti%2C+C">Chiara Nosarti</a>, <a href="/search/eess?searchtype=author&amp;query=Robinson%2C+E+C">Emma C. Robinson</a>, <a href="/search/eess?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.08783v2-abstract-short" style="display: inline;"> The Developing Human Connectome Project (dHCP) aims to explore developmental patterns of the human brain during the perinatal period. An automated processing pipeline has been developed to extract high-quality cortical surfaces from structural brain magnetic resonance (MR) images for the dHCP neonatal dataset. However, the current implementation of the pipeline requires more than 6.5 hours to proc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.08783v2-abstract-full').style.display = 'inline'; document.getElementById('2405.08783v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.08783v2-abstract-full" style="display: none;"> The Developing Human Connectome Project (dHCP) aims to explore developmental patterns of the human brain during the perinatal period. An automated processing pipeline has been developed to extract high-quality cortical surfaces from structural brain magnetic resonance (MR) images for the dHCP neonatal dataset. However, the current implementation of the pipeline requires more than 6.5 hours to process a single MRI scan, making it expensive for large-scale neuroimaging studies. In this paper, we propose a fast deep learning (DL) based pipeline for dHCP neonatal cortical surface reconstruction, incorporating DL-based brain extraction, cortical surface reconstruction and spherical projection, as well as GPU-accelerated cortical surface inflation and cortical feature estimation. We introduce a multiscale deformation network to learn diffeomorphic cortical surface reconstruction end-to-end from T2-weighted brain MRI. A fast unsupervised spherical mapping approach is integrated to minimize metric distortions between cortical surfaces and projected spheres. The entire workflow of our DL-based dHCP pipeline completes within only 24 seconds on a modern GPU, which is nearly 1000 times faster than the original dHCP pipeline. The qualitative assessment demonstrates that for 82.5% of the test samples, the cortical surfaces reconstructed by our DL-based pipeline achieve superior (54.2%) or equal (28.3%) surface quality compared to the original dHCP pipeline. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.08783v2-abstract-full').style.display = 'none'; document.getElementById('2405.08783v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by Medical Image Analysis</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.06463">arXiv:2405.06463</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.06463">pdf</a>, <a href="https://arxiv.org/format/2405.06463">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> MRSegmentator: Multi-Modality Segmentation of 40 Classes in MRI and CT </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=H%C3%A4ntze%2C+H">Hartmut H盲ntze</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+L">Lina Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Mertens%2C+C+J">Christian J. Mertens</a>, <a href="/search/eess?searchtype=author&amp;query=Dorfner%2C+F+J">Felix J. Dorfner</a>, <a href="/search/eess?searchtype=author&amp;query=Donle%2C+L">Leonhard Donle</a>, <a href="/search/eess?searchtype=author&amp;query=Busch%2C+F">Felix Busch</a>, <a href="/search/eess?searchtype=author&amp;query=Kader%2C+A">Avan Kader</a>, <a href="/search/eess?searchtype=author&amp;query=Ziegelmayer%2C+S">Sebastian Ziegelmayer</a>, <a href="/search/eess?searchtype=author&amp;query=Bayerl%2C+N">Nadine Bayerl</a>, <a href="/search/eess?searchtype=author&amp;query=Navab%2C+N">Nassir Navab</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Schnabel%2C+J">Julia Schnabel</a>, <a href="/search/eess?searchtype=author&amp;query=Aerts%2C+H+J">Hugo JWL Aerts</a>, <a href="/search/eess?searchtype=author&amp;query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/eess?searchtype=author&amp;query=Bamberg%2C+F">Fabian Bamberg</a>, <a href="/search/eess?searchtype=author&amp;query=Wei%C3%9F%2C+J">Jakob Wei脽</a>, <a href="/search/eess?searchtype=author&amp;query=Schlett%2C+C+L">Christopher L. Schlett</a>, <a href="/search/eess?searchtype=author&amp;query=Ringhof%2C+S">Steffen Ringhof</a>, <a href="/search/eess?searchtype=author&amp;query=Niendorf%2C+T">Thoralf Niendorf</a>, <a href="/search/eess?searchtype=author&amp;query=Pischon%2C+T">Tobias Pischon</a>, <a href="/search/eess?searchtype=author&amp;query=Kauczor%2C+H">Hans-Ulrich Kauczor</a>, <a href="/search/eess?searchtype=author&amp;query=Nonnenmacher%2C+T">Tobias Nonnenmacher</a>, <a href="/search/eess?searchtype=author&amp;query=Kr%C3%B6ncke%2C+T">Thomas Kr枚ncke</a>, <a href="/search/eess?searchtype=author&amp;query=V%C3%B6lzke%2C+H">Henry V枚lzke</a>, <a href="/search/eess?searchtype=author&amp;query=Schulz-Menger%2C+J">Jeanette Schulz-Menger</a> , et al. (7 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.06463v3-abstract-short" style="display: inline;"> Purpose: To develop and evaluate a deep learning model for multi-organ segmentation of MRI scans. Materials and Methods: The model was trained on 1,200 manually annotated 3D axial MRI scans from the UK Biobank, 221 in-house MRI scans, and 1228 CT scans from the TotalSegmentator dataset. A human-in-the-loop annotation workflow was employed, leveraging cross-modality transfer learning from an exis&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.06463v3-abstract-full').style.display = 'inline'; document.getElementById('2405.06463v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.06463v3-abstract-full" style="display: none;"> Purpose: To develop and evaluate a deep learning model for multi-organ segmentation of MRI scans. Materials and Methods: The model was trained on 1,200 manually annotated 3D axial MRI scans from the UK Biobank, 221 in-house MRI scans, and 1228 CT scans from the TotalSegmentator dataset. A human-in-the-loop annotation workflow was employed, leveraging cross-modality transfer learning from an existing CT segmentation model to segment 40 anatomical structures. The annotation process began with a model based on transfer learning between CT and MR, which was iteratively refined based on manual corrections to predicted segmentations. The model&#39;s performance was evaluated on MRI examinations obtained from the German National Cohort (NAKO) study (n=900) from the AMOS22 dataset (n=60) and from the TotalSegmentator-MRI test data (n=29). The Dice Similarity Coefficient (DSC) and Hausdorff Distance (HD) were used to assess segmentation quality, stratified by organ and scan type. The model and its weights will be open-sourced. Results: MRSegmentator demonstrated high accuracy for well-defined organs (lungs: DSC 0.96, heart: DSC 0.94) and organs with anatomic variability (liver: DSC 0.96, kidneys: DSC 0.95). Smaller structures showed lower accuracy (portal/splenic veins: DSC 0.64, adrenal glands: DSC 0.69). On external validation using NAKO data, mean DSC ranged from 0.85 $\pm$ 0.08 for T2-HASTE to 0.91 $\pm$ 0.05 for in-phase sequences. The model generalized well to CT, achieving mean DSC of 0.84 $\pm$ 0.11 on AMOS CT data. Conclusion: MRSegmentator accurately segments 40 anatomical structures in MRI across diverse datasets and imaging protocols, with additional generalizability to CT images. This open-source model will provide a valuable tool for automated multi-organ segmentation in medical imaging research. It can be downloaded from https://github.com/hhaentze/MRSegmentator. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.06463v3-abstract-full').style.display = 'none'; document.getElementById('2405.06463v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 6 figures; updated data; completed co-author info</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> J.3 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.17621">arXiv:2404.17621</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.17621">pdf</a>, <a href="https://arxiv.org/format/2404.17621">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TMI.2024.3385024">10.1109/TMI.2024.3385024 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Attention-aware non-rigid image registration for accelerated MR imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Ghoul%2C+A">Aya Ghoul</a>, <a href="/search/eess?searchtype=author&amp;query=Pan%2C+J">Jiazhen Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Lingg%2C+A">Andreas Lingg</a>, <a href="/search/eess?searchtype=author&amp;query=K%C3%BCbler%2C+J">Jens K眉bler</a>, <a href="/search/eess?searchtype=author&amp;query=Krumm%2C+P">Patrick Krumm</a>, <a href="/search/eess?searchtype=author&amp;query=Hammernik%2C+K">Kerstin Hammernik</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Gatidis%2C+S">Sergios Gatidis</a>, <a href="/search/eess?searchtype=author&amp;query=K%C3%BCstner%2C+T">Thomas K眉stner</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.17621v1-abstract-short" style="display: inline;"> Accurate motion estimation at high acceleration factors enables rapid motion-compensated reconstruction in Magnetic Resonance Imaging (MRI) without compromising the diagnostic image quality. In this work, we introduce an attention-aware deep learning-based framework that can perform non-rigid pairwise registration for fully sampled and accelerated MRI. We extract local visual representations to bu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.17621v1-abstract-full').style.display = 'inline'; document.getElementById('2404.17621v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.17621v1-abstract-full" style="display: none;"> Accurate motion estimation at high acceleration factors enables rapid motion-compensated reconstruction in Magnetic Resonance Imaging (MRI) without compromising the diagnostic image quality. In this work, we introduce an attention-aware deep learning-based framework that can perform non-rigid pairwise registration for fully sampled and accelerated MRI. We extract local visual representations to build similarity maps between the registered image pairs at multiple resolution levels and additionally leverage long-range contextual information using a transformer-based module to alleviate ambiguities in the presence of artifacts caused by undersampling. We combine local and global dependencies to perform simultaneous coarse and fine motion estimation. The proposed method was evaluated on in-house acquired fully sampled and accelerated data of 101 patients and 62 healthy subjects undergoing cardiac and thoracic MRI. The impact of motion estimation accuracy on the downstream task of motion-compensated reconstruction was analyzed. We demonstrate that our model derives reliable and consistent motion fields across different sampling trajectories (Cartesian and radial) and acceleration factors of up to 16x for cardiac motion and 30x for respiratory motion and achieves superior image quality in motion-compensated reconstruction qualitatively and quantitatively compared to conventional and recent deep learning-based approaches. The code is publicly available at https://github.com/lab-midas/GMARAFT. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.17621v1-abstract-full').style.display = 'none'; document.getElementById('2404.17621v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.08350">arXiv:2404.08350</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.08350">pdf</a>, <a href="https://arxiv.org/format/2404.08350">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> </div> <p class="title is-5 mathjax"> Self-Supervised k-Space Regularization for Motion-Resolved Abdominal MRI Using Neural Implicit k-Space Representation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Spieker%2C+V">Veronika Spieker</a>, <a href="/search/eess?searchtype=author&amp;query=Eichhorn%2C+H">Hannah Eichhorn</a>, <a href="/search/eess?searchtype=author&amp;query=Stelter%2C+J+K">Jonathan K. Stelter</a>, <a href="/search/eess?searchtype=author&amp;query=Huang%2C+W">Wenqi Huang</a>, <a href="/search/eess?searchtype=author&amp;query=Braren%2C+R+F">Rickmer F. Braren</a>, <a href="/search/eess?searchtype=author&amp;query=R%C3%BCckert%2C+D">Daniel R眉ckert</a>, <a href="/search/eess?searchtype=author&amp;query=Costabal%2C+F+S">Francisco Sahli Costabal</a>, <a href="/search/eess?searchtype=author&amp;query=Hammernik%2C+K">Kerstin Hammernik</a>, <a href="/search/eess?searchtype=author&amp;query=Prieto%2C+C">Claudia Prieto</a>, <a href="/search/eess?searchtype=author&amp;query=Karampinos%2C+D+C">Dimitrios C. Karampinos</a>, <a href="/search/eess?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.08350v1-abstract-short" style="display: inline;"> Neural implicit k-space representations have shown promising results for dynamic MRI at high temporal resolutions. Yet, their exclusive training in k-space limits the application of common image regularization methods to improve the final reconstruction. In this work, we introduce the concept of parallel imaging-inspired self-consistency (PISCO), which we incorporate as novel self-supervised k-spa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.08350v1-abstract-full').style.display = 'inline'; document.getElementById('2404.08350v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.08350v1-abstract-full" style="display: none;"> Neural implicit k-space representations have shown promising results for dynamic MRI at high temporal resolutions. Yet, their exclusive training in k-space limits the application of common image regularization methods to improve the final reconstruction. In this work, we introduce the concept of parallel imaging-inspired self-consistency (PISCO), which we incorporate as novel self-supervised k-space regularization enforcing a consistent neighborhood relationship. At no additional data cost, the proposed regularization significantly improves neural implicit k-space reconstructions on simulated data. Abdominal in-vivo reconstructions using PISCO result in enhanced spatio-temporal image quality compared to state-of-the-art methods. Code is available at https://github.com/vjspi/PISCO-NIK. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.08350v1-abstract-full').style.display = 'none'; document.getElementById('2404.08350v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under Review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.00767">arXiv:2404.00767</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.00767">pdf</a>, <a href="https://arxiv.org/format/2404.00767">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Intensity-based 3D motion correction for cardiac MR images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Stolt-Ans%C3%B3%2C+N">Nil Stolt-Ans贸</a>, <a href="/search/eess?searchtype=author&amp;query=Sideri-Lampretsa%2C+V">Vasiliki Sideri-Lampretsa</a>, <a href="/search/eess?searchtype=author&amp;query=Dannecker%2C+M">Maik Dannecker</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.00767v1-abstract-short" style="display: inline;"> Cardiac magnetic resonance (CMR) image acquisition requires subjects to hold their breath while 2D cine images are acquired. This process assumes that the heart remains in the same position across all slices. However, differences in breathhold positions or patient motion introduce 3D slice misalignments. In this work, we propose an algorithm that simultaneously aligns all SA and LA slices by maxim&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00767v1-abstract-full').style.display = 'inline'; document.getElementById('2404.00767v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.00767v1-abstract-full" style="display: none;"> Cardiac magnetic resonance (CMR) image acquisition requires subjects to hold their breath while 2D cine images are acquired. This process assumes that the heart remains in the same position across all slices. However, differences in breathhold positions or patient motion introduce 3D slice misalignments. In this work, we propose an algorithm that simultaneously aligns all SA and LA slices by maximizing the pair-wise intensity agreement between their intersections. Unlike previous works, our approach is formulated as a subject-specific optimization problem and requires no prior knowledge of the underlying anatomy. We quantitatively demonstrate that the proposed method is robust against a large range of rotations and translations by synthetically misaligning 10 motion-free datasets and aligning them back using the proposed method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00767v1-abstract-full').style.display = 'none'; document.getElementById('2404.00767v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.16776">arXiv:2403.16776</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.16776">pdf</a>, <a href="https://arxiv.org/format/2403.16776">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Diff-Def: Diffusion-Generated Deformation Fields for Conditional Atlases </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Starck%2C+S">Sophie Starck</a>, <a href="/search/eess?searchtype=author&amp;query=Sideri-Lampretsa%2C+V">Vasiliki Sideri-Lampretsa</a>, <a href="/search/eess?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a>, <a href="/search/eess?searchtype=author&amp;query=Menten%2C+M">Martin Menten</a>, <a href="/search/eess?searchtype=author&amp;query=Mueller%2C+T">Tamara Mueller</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.16776v1-abstract-short" style="display: inline;"> Anatomical atlases are widely used for population analysis. Conditional atlases target a particular sub-population defined via certain conditions (e.g. demographics or pathologies) and allow for the investigation of fine-grained anatomical differences - such as morphological changes correlated with age. Existing approaches use either registration-based methods that are unable to handle large anato&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.16776v1-abstract-full').style.display = 'inline'; document.getElementById('2403.16776v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.16776v1-abstract-full" style="display: none;"> Anatomical atlases are widely used for population analysis. Conditional atlases target a particular sub-population defined via certain conditions (e.g. demographics or pathologies) and allow for the investigation of fine-grained anatomical differences - such as morphological changes correlated with age. Existing approaches use either registration-based methods that are unable to handle large anatomical variations or generative models, which can suffer from training instabilities and hallucinations. To overcome these limitations, we use latent diffusion models to generate deformation fields, which transform a general population atlas into one representing a specific sub-population. By generating a deformation field and registering the conditional atlas to a neighbourhood of images, we ensure structural plausibility and avoid hallucinations, which can occur during direct image synthesis. We compare our method to several state-of-the-art atlas generation methods in experiments using 5000 brain as well as whole-body MR images from UK Biobank. Our method generates highly realistic atlases with smooth transformations and high anatomical fidelity, outperforming the baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.16776v1-abstract-full').style.display = 'none'; document.getElementById('2403.16776v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.11001">arXiv:2403.11001</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.11001">pdf</a>, <a href="https://arxiv.org/format/2403.11001">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-031-72111-3_68">10.1007/978-3-031-72111-3_68 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Topologically Faithful Multi-class Segmentation in Medical Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Berger%2C+A+H">Alexander H. Berger</a>, <a href="/search/eess?searchtype=author&amp;query=Stucki%2C+N">Nico Stucki</a>, <a href="/search/eess?searchtype=author&amp;query=Lux%2C+L">Laurin Lux</a>, <a href="/search/eess?searchtype=author&amp;query=Buergin%2C+V">Vincent Buergin</a>, <a href="/search/eess?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/eess?searchtype=author&amp;query=Banaszak%2C+A">Anna Banaszak</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Bauer%2C+U">Ulrich Bauer</a>, <a href="/search/eess?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.11001v2-abstract-short" style="display: inline;"> Topological accuracy in medical image segmentation is a highly important property for downstream applications such as network analysis and flow modeling in vessels or cell counting. Recently, significant methodological advancements have brought well-founded concepts from algebraic topology to binary segmentation. However, these approaches have been underexplored in multi-class segmentation scenari&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.11001v2-abstract-full').style.display = 'inline'; document.getElementById('2403.11001v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.11001v2-abstract-full" style="display: none;"> Topological accuracy in medical image segmentation is a highly important property for downstream applications such as network analysis and flow modeling in vessels or cell counting. Recently, significant methodological advancements have brought well-founded concepts from algebraic topology to binary segmentation. However, these approaches have been underexplored in multi-class segmentation scenarios, where topological errors are common. We propose a general loss function for topologically faithful multi-class segmentation extending the recent Betti matching concept, which is based on induced matchings of persistence barcodes. We project the N-class segmentation problem to N single-class segmentation tasks, which allows us to use 1-parameter persistent homology, making training of neural networks computationally feasible. We validate our method on a comprehensive set of four medical datasets with highly variant topological characteristics. Our loss formulation significantly enhances topological correctness in cardiac, cell, artery-vein, and Circle of Willis segmentation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.11001v2-abstract-full').style.display = 'none'; document.getElementById('2403.11001v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> MICCAI 2024, Lecture Notes in Computer Science, vol. 15008, pp. 721-731, 2024 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.08464">arXiv:2403.08464</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.08464">pdf</a>, <a href="https://arxiv.org/format/2403.08464">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Diffusion Models with Implicit Guidance for Medical Anomaly Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Bercea%2C+C+I">Cosmin I. Bercea</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.08464v1-abstract-short" style="display: inline;"> Diffusion models have advanced unsupervised anomaly detection by improving the transformation of pathological images into pseudo-healthy equivalents. Nonetheless, standard approaches may compromise critical information during pathology removal, leading to restorations that do not align with unaffected regions in the original scans. Such discrepancies can inadvertently increase false positive rates&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.08464v1-abstract-full').style.display = 'inline'; document.getElementById('2403.08464v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.08464v1-abstract-full" style="display: none;"> Diffusion models have advanced unsupervised anomaly detection by improving the transformation of pathological images into pseudo-healthy equivalents. Nonetheless, standard approaches may compromise critical information during pathology removal, leading to restorations that do not align with unaffected regions in the original scans. Such discrepancies can inadvertently increase false positive rates and reduce specificity, complicating radiological evaluations. This paper introduces Temporal Harmonization for Optimal Restoration (THOR), which refines the de-noising process by integrating implicit guidance through temporal anomaly maps. THOR aims to preserve the integrity of healthy tissue in areas unaffected by pathology. Comparative evaluations show that THOR surpasses existing diffusion-based methods in detecting and segmenting anomalies in brain MRIs and wrist X-rays. Code: https://github.com/ci-ber/THOR_DDPM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.08464v1-abstract-full').style.display = 'none'; document.getElementById('2403.08464v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.16368">arXiv:2402.16368</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.16368">pdf</a>, <a href="https://arxiv.org/format/2402.16368">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s00330-024-11155-y">10.1007/s00330-024-11155-y <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> SPINEPS -- Automatic Whole Spine Segmentation of T2-weighted MR images using a Two-Phase Approach to Multi-class Semantic and Instance Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=M%C3%B6ller%2C+H">Hendrik M枚ller</a>, <a href="/search/eess?searchtype=author&amp;query=Graf%2C+R">Robert Graf</a>, <a href="/search/eess?searchtype=author&amp;query=Schmitt%2C+J">Joachim Schmitt</a>, <a href="/search/eess?searchtype=author&amp;query=Keinert%2C+B">Benjamin Keinert</a>, <a href="/search/eess?searchtype=author&amp;query=Atad%2C+M">Matan Atad</a>, <a href="/search/eess?searchtype=author&amp;query=Sekuboyina%2C+A">Anjany Sekuboyina</a>, <a href="/search/eess?searchtype=author&amp;query=Streckenbach%2C+F">Felix Streckenbach</a>, <a href="/search/eess?searchtype=author&amp;query=Sch%C3%B6n%2C+H">Hanna Sch枚n</a>, <a href="/search/eess?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/eess?searchtype=author&amp;query=Kroencke%2C+T">Thomas Kroencke</a>, <a href="/search/eess?searchtype=author&amp;query=Bette%2C+S">Stefanie Bette</a>, <a href="/search/eess?searchtype=author&amp;query=Willich%2C+S">Stefan Willich</a>, <a href="/search/eess?searchtype=author&amp;query=Keil%2C+T">Thomas Keil</a>, <a href="/search/eess?searchtype=author&amp;query=Niendorf%2C+T">Thoralf Niendorf</a>, <a href="/search/eess?searchtype=author&amp;query=Pischon%2C+T">Tobias Pischon</a>, <a href="/search/eess?searchtype=author&amp;query=Endemann%2C+B">Beate Endemann</a>, <a href="/search/eess?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.16368v2-abstract-short" style="display: inline;"> Purpose. To present SPINEPS, an open-source deep learning approach for semantic and instance segmentation of 14 spinal structures (ten vertebra substructures, intervertebral discs, spinal cord, spinal canal, and sacrum) in whole body T2w MRI. Methods. During this HIPPA-compliant, retrospective study, we utilized the public SPIDER dataset (218 subjects, 63% female) and a subset of the German Nati&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.16368v2-abstract-full').style.display = 'inline'; document.getElementById('2402.16368v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.16368v2-abstract-full" style="display: none;"> Purpose. To present SPINEPS, an open-source deep learning approach for semantic and instance segmentation of 14 spinal structures (ten vertebra substructures, intervertebral discs, spinal cord, spinal canal, and sacrum) in whole body T2w MRI. Methods. During this HIPPA-compliant, retrospective study, we utilized the public SPIDER dataset (218 subjects, 63% female) and a subset of the German National Cohort (1423 subjects, mean age 53, 49% female) for training and evaluation. We combined CT and T2w segmentations to train models that segment 14 spinal structures in T2w sagittal scans both semantically and instance-wise. Performance evaluation metrics included Dice similarity coefficient, average symmetrical surface distance, panoptic quality, segmentation quality, and recognition quality. Statistical significance was assessed using the Wilcoxon signed-rank test. An in-house dataset was used to qualitatively evaluate out-of-distribution samples. Results. On the public dataset, our approach outperformed the baseline (instance-wise vertebra dice score 0.929 vs. 0.907, p-value&lt;0.001). Training on auto-generated annotations and evaluating on manually corrected test data from the GNC yielded global dice scores of 0.900 for vertebrae, 0.960 for intervertebral discs, and 0.947 for the spinal canal. Incorporating the SPIDER dataset during training increased these scores to 0.920, 0.967, 0.958, respectively. Conclusions. The proposed segmentation approach offers robust segmentation of 14 spinal structures in T2w sagittal images, including the spinal cord, spinal canal, intervertebral discs, endplate, sacrum, and vertebrae. The approach yields both a semantic and instance mask as output, thus being easy to utilize. This marks the first publicly available algorithm for whole spine segmentation in sagittal T2w MR imaging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.16368v2-abstract-full').style.display = 'none'; document.getElementById('2402.16368v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">https://github.com/Hendrik-code/spineps</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.16564">arXiv:2401.16564</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.16564">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/RBME.2024.3485022">10.1109/RBME.2024.3485022 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Data and Physics driven Deep Learning Models for Fast MRI Reconstruction: Fundamentals and Methodologies </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Huang%2C+J">Jiahao Huang</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+Y">Yinzhe Wu</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+F">Fanwen Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Fang%2C+Y">Yingying Fang</a>, <a href="/search/eess?searchtype=author&amp;query=Nan%2C+Y">Yang Nan</a>, <a href="/search/eess?searchtype=author&amp;query=Alkan%2C+C">Cagan Alkan</a>, <a href="/search/eess?searchtype=author&amp;query=Abraham%2C+D">Daniel Abraham</a>, <a href="/search/eess?searchtype=author&amp;query=Liao%2C+C">Congyu Liao</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+L">Lei Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Gao%2C+Z">Zhifan Gao</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+W">Weiwen Wu</a>, <a href="/search/eess?searchtype=author&amp;query=Zhu%2C+L">Lei Zhu</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+Z">Zhaolin Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Lally%2C+P">Peter Lally</a>, <a href="/search/eess?searchtype=author&amp;query=Bangerter%2C+N">Neal Bangerter</a>, <a href="/search/eess?searchtype=author&amp;query=Setsompop%2C+K">Kawin Setsompop</a>, <a href="/search/eess?searchtype=author&amp;query=Guo%2C+Y">Yike Guo</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+G">Ge Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+G">Guang Yang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.16564v2-abstract-short" style="display: inline;"> Magnetic Resonance Imaging (MRI) is a pivotal clinical diagnostic tool, yet its extended scanning times often compromise patient comfort and image quality, especially in volumetric, temporal and quantitative scans. This review elucidates recent advances in MRI acceleration via data and physics-driven models, leveraging techniques from algorithm unrolling models, enhancement-based methods, and plug&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.16564v2-abstract-full').style.display = 'inline'; document.getElementById('2401.16564v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.16564v2-abstract-full" style="display: none;"> Magnetic Resonance Imaging (MRI) is a pivotal clinical diagnostic tool, yet its extended scanning times often compromise patient comfort and image quality, especially in volumetric, temporal and quantitative scans. This review elucidates recent advances in MRI acceleration via data and physics-driven models, leveraging techniques from algorithm unrolling models, enhancement-based methods, and plug-and-play models to the emerging full spectrum of generative model-based methods. We also explore the synergistic integration of data models with physics-based insights, encompassing the advancements in multi-coil hardware accelerations like parallel imaging and simultaneous multi-slice imaging, and the optimization of sampling patterns. We then focus on domain-specific challenges and opportunities, including image redundancy exploitation, image integrity, evaluation metrics, data heterogeneity, and model generalization. This work also discusses potential solutions and future research directions, with an emphasis on the role of data harmonization and federated learning for further improving the general applicability and performance of these methods in MRI reconstruction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.16564v2-abstract-full').style.display = 'none'; document.getElementById('2401.16564v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by IEEE Reviews in Biomedical Engineering (RBME)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.10637">arXiv:2401.10637</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.10637">pdf</a>, <a href="https://arxiv.org/format/2401.10637">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Towards Universal Unsupervised Anomaly Detection in Medical Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Bercea%2C+C+I">Cosmin I. Bercea</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.10637v1-abstract-short" style="display: inline;"> The increasing complexity of medical imaging data underscores the need for advanced anomaly detection methods to automatically identify diverse pathologies. Current methods face challenges in capturing the broad spectrum of anomalies, often limiting their use to specific lesion types in brain scans. To address this challenge, we introduce a novel unsupervised approach, termed \textit{Reversed Auto&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.10637v1-abstract-full').style.display = 'inline'; document.getElementById('2401.10637v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.10637v1-abstract-full" style="display: none;"> The increasing complexity of medical imaging data underscores the need for advanced anomaly detection methods to automatically identify diverse pathologies. Current methods face challenges in capturing the broad spectrum of anomalies, often limiting their use to specific lesion types in brain scans. To address this challenge, we introduce a novel unsupervised approach, termed \textit{Reversed Auto-Encoders (RA)}, designed to create realistic pseudo-healthy reconstructions that enable the detection of a wider range of pathologies. We evaluate the proposed method across various imaging modalities, including magnetic resonance imaging (MRI) of the brain, pediatric wrist X-ray, and chest X-ray, and demonstrate superior performance in detecting anomalies compared to existing state-of-the-art methods. Our unsupervised anomaly detection approach may enhance diagnostic accuracy in medical imaging by identifying a broader range of unknown pathologies. Our code is publicly available at: \url{https://github.com/ci-ber/RA}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.10637v1-abstract-full').style.display = 'none'; document.getElementById('2401.10637v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.02608">arXiv:2312.02608</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.02608">pdf</a>, <a href="https://arxiv.org/format/2312.02608">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Panoptica -- instance-wise evaluation of 3D semantic and instance segmentation maps </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/eess?searchtype=author&amp;query=M%C3%B6ller%2C+H">Hendrik M枚ller</a>, <a href="/search/eess?searchtype=author&amp;query=Buchner%2C+J+A">Josef A. Buchner</a>, <a href="/search/eess?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/eess?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/eess?searchtype=author&amp;query=Rosier%2C+M">Marcel Rosier</a>, <a href="/search/eess?searchtype=author&amp;query=Mekki%2C+I">Isra Mekki</a>, <a href="/search/eess?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/eess?searchtype=author&amp;query=Negwer%2C+M">Moritz Negwer</a>, <a href="/search/eess?searchtype=author&amp;query=Al-Maskari%2C+R">Rami Al-Maskari</a>, <a href="/search/eess?searchtype=author&amp;query=Ert%C3%BCrk%2C+A">Ali Ert眉rk</a>, <a href="/search/eess?searchtype=author&amp;query=Vinayahalingam%2C+S">Shankeeth Vinayahalingam</a>, <a href="/search/eess?searchtype=author&amp;query=Isensee%2C+F">Fabian Isensee</a>, <a href="/search/eess?searchtype=author&amp;query=Pati%2C+S">Sarthak Pati</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a>, <a href="/search/eess?searchtype=author&amp;query=Ehrlich%2C+S+K">Stefan K. Ehrlich</a>, <a href="/search/eess?searchtype=author&amp;query=Reinke%2C+A">Annika Reinke</a>, <a href="/search/eess?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.02608v1-abstract-short" style="display: inline;"> This paper introduces panoptica, a versatile and performance-optimized package designed for computing instance-wise segmentation quality metrics from 2D and 3D segmentation maps. panoptica addresses the limitations of existing metrics and provides a modular framework that complements the original intersection over union-based panoptic quality with other metrics, such as the distance metric Average&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02608v1-abstract-full').style.display = 'inline'; document.getElementById('2312.02608v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.02608v1-abstract-full" style="display: none;"> This paper introduces panoptica, a versatile and performance-optimized package designed for computing instance-wise segmentation quality metrics from 2D and 3D segmentation maps. panoptica addresses the limitations of existing metrics and provides a modular framework that complements the original intersection over union-based panoptic quality with other metrics, such as the distance metric Average Symmetric Surface Distance. The package is open-source, implemented in Python, and accompanied by comprehensive documentation and tutorials. panoptica employs a three-step metrics computation process to cover diverse use cases. The efficacy of panoptica is demonstrated on various real-world biomedical datasets, where an instance-wise evaluation is instrumental for an accurate representation of the underlying clinical task. Overall, we envision panoptica as a valuable tool facilitating in-depth evaluation of segmentation methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02608v1-abstract-full').style.display = 'none'; document.getElementById('2312.02608v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 6 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.10756">arXiv:2310.10756</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.10756">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Deep Conditional Shape Models for 3D cardiac image segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Jacob%2C+A+J">Athira J Jacob</a>, <a href="/search/eess?searchtype=author&amp;query=Sharma%2C+P">Puneet Sharma</a>, <a href="/search/eess?searchtype=author&amp;query=Ruckert%2C+D">Daniel Ruckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.10756v1-abstract-short" style="display: inline;"> Delineation of anatomical structures is often the first step of many medical image analysis workflows. While convolutional neural networks achieve high performance, these do not incorporate anatomical shape information. We introduce a novel segmentation algorithm that uses Deep Conditional Shape models (DCSMs) as a core component. Using deep implicit shape representations, the algorithm learns a m&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.10756v1-abstract-full').style.display = 'inline'; document.getElementById('2310.10756v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.10756v1-abstract-full" style="display: none;"> Delineation of anatomical structures is often the first step of many medical image analysis workflows. While convolutional neural networks achieve high performance, these do not incorporate anatomical shape information. We introduce a novel segmentation algorithm that uses Deep Conditional Shape models (DCSMs) as a core component. Using deep implicit shape representations, the algorithm learns a modality-agnostic shape model that can generate the signed distance functions for any anatomy of interest. To fit the generated shape to the image, the shape model is conditioned on anatomic landmarks that can be automatically detected or provided by the user. Finally, we add a modality-dependent, lightweight refinement network to capture any fine details not represented by the implicit function. The proposed DCSM framework is evaluated on the problem of cardiac left ventricle (LV) segmentation from multiple 3D modalities (contrast-enhanced CT, non-contrasted CT, 3D echocardiography-3DE). We demonstrate that the automatic DCSM outperforms the baseline for non-contrasted CT without the local refinement, and with the refinement for contrasted CT and 3DE, especially with significant improvement in the Hausdorff distance. The semi-automatic DCSM with user-input landmarks, while only trained on contrasted CT, achieves greater than 92% Dice for all modalities. Both automatic DCSM with refinement and semi-automatic DCSM achieve equivalent or better performance compared to inter-user variability for these modalities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.10756v1-abstract-full').style.display = 'none'; document.getElementById('2310.10756v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted and presented as oral presentation at Statistical Atlases and Computational Modeling of the Heart (STACOM) workshop at MICCAI 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.02829">arXiv:2310.02829</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.02829">pdf</a>, <a href="https://arxiv.org/format/2310.02829">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> All Sizes Matter: Improving Volumetric Brain Segmentation on Small Lesions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Erdur%2C+A+C">Ayhan Can Erdur</a>, <a href="/search/eess?searchtype=author&amp;query=Scholz%2C+D">Daniel Scholz</a>, <a href="/search/eess?searchtype=author&amp;query=Buchner%2C+J+A">Josef A. Buchner</a>, <a href="/search/eess?searchtype=author&amp;query=Combs%2C+S+E">Stephanie E. Combs</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Peeken%2C+J+C">Jan C. Peeken</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.02829v1-abstract-short" style="display: inline;"> Brain metastases (BMs) are the most frequently occurring brain tumors. The treatment of patients having multiple BMs with stereo tactic radiosurgery necessitates accurate localization of the metastases. Neural networks can assist in this time-consuming and costly task that is typically performed by human experts. Particularly challenging is the detection of small lesions since they are often under&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.02829v1-abstract-full').style.display = 'inline'; document.getElementById('2310.02829v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.02829v1-abstract-full" style="display: none;"> Brain metastases (BMs) are the most frequently occurring brain tumors. The treatment of patients having multiple BMs with stereo tactic radiosurgery necessitates accurate localization of the metastases. Neural networks can assist in this time-consuming and costly task that is typically performed by human experts. Particularly challenging is the detection of small lesions since they are often underrepresented in exist ing approaches. Yet, lesion detection is equally important for all sizes. In this work, we develop an ensemble of neural networks explicitly fo cused on detecting and segmenting small BMs. To accomplish this task, we trained several neural networks focusing on individual aspects of the BM segmentation problem: We use blob loss that specifically addresses the imbalance of lesion instances in terms of size and texture and is, therefore, not biased towards larger lesions. In addition, a model using a subtraction sequence between the T1 and T1 contrast-enhanced sequence focuses on low-contrast lesions. Furthermore, we train additional models only on small lesions. Our experiments demonstrate the utility of the ad ditional blob loss and the subtraction sequence. However, including the specialized small lesion models in the ensemble deteriorates segmentation results. We also find domain-knowledge-inspired postprocessing steps to drastically increase our performance in most experiments. Our approach enables us to submit a competitive challenge entry to the ASNR-MICCAI BraTS Brain Metastasis Challenge 2023. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.02829v1-abstract-full').style.display = 'none'; document.getElementById('2310.02829v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.16853">arXiv:2309.16853</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.16853">pdf</a>, <a href="https://arxiv.org/format/2309.16853">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> T1/T2 relaxation temporal modelling from accelerated acquisitions using a Latent Transformer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Wang%2C+F">Fanwen Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Tanzer%2C+M">Michael Tanzer</a>, <a href="/search/eess?searchtype=author&amp;query=Qiao%2C+M">Mengyun Qiao</a>, <a href="/search/eess?searchtype=author&amp;query=Bai%2C+W">Wenjia Bai</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+G">Guang Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Nielles-Vallespin%2C+S">Sonia Nielles-Vallespin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.16853v1-abstract-short" style="display: inline;"> Quantitative cardiac magnetic resonance T1 and T2 mapping enable myocardial tissue characterisation but the lengthy scan times restrict their widespread clinical application. We propose a deep learning method that incorporates a time dependency Latent Transformer module to model relationships between parameterised time frames for improved reconstruction from undersampled data. The module, implemen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.16853v1-abstract-full').style.display = 'inline'; document.getElementById('2309.16853v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.16853v1-abstract-full" style="display: none;"> Quantitative cardiac magnetic resonance T1 and T2 mapping enable myocardial tissue characterisation but the lengthy scan times restrict their widespread clinical application. We propose a deep learning method that incorporates a time dependency Latent Transformer module to model relationships between parameterised time frames for improved reconstruction from undersampled data. The module, implemented as a multi-resolution sequence-to-sequence transformer, is integrated into an encoder-decoder architecture to leverage the inherent temporal correlations in relaxation processes. The presented results for accelerated T1 and T2 mapping show the model recovers maps with higher fidelity by explicit incorporation of time dynamics. This work demonstrates the importance of temporal modelling for artifact-free reconstruction in quantitative MRI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.16853v1-abstract-full').style.display = 'none'; document.getElementById('2309.16853v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.16735">arXiv:2309.16735</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.16735">pdf</a>, <a href="https://arxiv.org/format/2309.16735">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Biological Physics">physics.bio-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> Learnable real-time inference of molecular composition from diffuse spectroscopy of brain tissue </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/eess?searchtype=author&amp;query=Scibilia%2C+K">Kevin Scibilia</a>, <a href="/search/eess?searchtype=author&amp;query=Giannoni%2C+L">Luca Giannoni</a>, <a href="/search/eess?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/eess?searchtype=author&amp;query=Iliash%2C+I">Ivan Iliash</a>, <a href="/search/eess?searchtype=author&amp;query=Hsieh%2C+F">Felix Hsieh</a>, <a href="/search/eess?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/eess?searchtype=author&amp;query=Caredda%2C+C">Charly Caredda</a>, <a href="/search/eess?searchtype=author&amp;query=Lange%2C+F">Fred Lange</a>, <a href="/search/eess?searchtype=author&amp;query=Tachtsidis%2C+I">Ilias Tachtsidis</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.16735v6-abstract-short" style="display: inline;"> Diffuse optical modalities such as broadband near-infrared spectroscopy (bNIRS) and hyperspectral imaging (HSI) represent a promising alternative for low-cost, non-invasive, and fast monitoring of functional and structural properties of living tissue. Particularly, the possibility of extracting the molecular composition of the tissue from the optical spectra in real-time deems the spectroscopy tec&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.16735v6-abstract-full').style.display = 'inline'; document.getElementById('2309.16735v6-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.16735v6-abstract-full" style="display: none;"> Diffuse optical modalities such as broadband near-infrared spectroscopy (bNIRS) and hyperspectral imaging (HSI) represent a promising alternative for low-cost, non-invasive, and fast monitoring of functional and structural properties of living tissue. Particularly, the possibility of extracting the molecular composition of the tissue from the optical spectra in real-time deems the spectroscopy techniques as a unique diagnostic tool. However, no established method exists to streamline the inference of the biochemical composition from the optical spectrum for real-time applications such as surgical monitoring. In this paper, we analyse a machine learning technique for fast and accurate inference of changes in the molecular composition of brain tissue. We reconsider and propose modifications to the existing learnable methodology based on the Beer-Lambert law, which analytically connects the spectra with concentrations. We evaluate the method&#39;s applicability to linear and non-linear formulations of the Beer-Lambert law. The approach is tested on real data obtained from the bNIRS- and HSI-based optical monitoring of brain tissue. The results demonstrate that the proposed method enables real-time molecular composition inference while maintaining the accuracy of traditional linear and non-linear optimization solvers. Preliminary findings show that Beer-Lambert law-based spectral unmixing allows to contrast brain anatomy semantics such as the vessel tree and tumor area. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.16735v6-abstract-full').style.display = 'none'; document.getElementById('2309.16735v6-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.14306">arXiv:2309.14306</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.14306">pdf</a>, <a href="https://arxiv.org/format/2309.14306">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DeepMesh: Mesh-based Cardiac Motion Tracking using Deep Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Meng%2C+Q">Qingjie Meng</a>, <a href="/search/eess?searchtype=author&amp;query=Bai%2C+W">Wenjia Bai</a>, <a href="/search/eess?searchtype=author&amp;query=O%27Regan%2C+D+P">Declan P O&#39;Regan</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+a+D">and Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.14306v1-abstract-short" style="display: inline;"> 3D motion estimation from cine cardiac magnetic resonance (CMR) images is important for the assessment of cardiac function and the diagnosis of cardiovascular diseases. Current state-of-the art methods focus on estimating dense pixel-/voxel-wise motion fields in image space, which ignores the fact that motion estimation is only relevant and useful within the anatomical objects of interest, e.g., t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.14306v1-abstract-full').style.display = 'inline'; document.getElementById('2309.14306v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.14306v1-abstract-full" style="display: none;"> 3D motion estimation from cine cardiac magnetic resonance (CMR) images is important for the assessment of cardiac function and the diagnosis of cardiovascular diseases. Current state-of-the art methods focus on estimating dense pixel-/voxel-wise motion fields in image space, which ignores the fact that motion estimation is only relevant and useful within the anatomical objects of interest, e.g., the heart. In this work, we model the heart as a 3D mesh consisting of epi- and endocardial surfaces. We propose a novel learning framework, DeepMesh, which propagates a template heart mesh to a subject space and estimates the 3D motion of the heart mesh from CMR images for individual subjects. In DeepMesh, the heart mesh of the end-diastolic frame of an individual subject is first reconstructed from the template mesh. Mesh-based 3D motion fields with respect to the end-diastolic frame are then estimated from 2D short- and long-axis CMR images. By developing a differentiable mesh-to-image rasterizer, DeepMesh is able to leverage 2D shape information from multiple anatomical views for 3D mesh reconstruction and mesh motion estimation. The proposed method estimates vertex-wise displacement and thus maintains vertex correspondences between time frames, which is important for the quantitative assessment of cardiac function across different subjects and populations. We evaluate DeepMesh on CMR images acquired from the UK Biobank. We focus on 3D motion estimation of the left ventricle in this work. Experimental results show that the proposed method quantitatively and qualitatively outperforms other image-based and mesh-based cardiac motion tracking methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.14306v1-abstract-full').style.display = 'none'; document.getElementById('2309.14306v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.14198">arXiv:2309.14198</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.14198">pdf</a>, <a href="https://arxiv.org/format/2309.14198">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> (Predictable) Performance Bias in Unsupervised Anomaly Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Meissen%2C+F">Felix Meissen</a>, <a href="/search/eess?searchtype=author&amp;query=Breuer%2C+S">Svenja Breuer</a>, <a href="/search/eess?searchtype=author&amp;query=Knolle%2C+M">Moritz Knolle</a>, <a href="/search/eess?searchtype=author&amp;query=Buyx%2C+A">Alena Buyx</a>, <a href="/search/eess?searchtype=author&amp;query=M%C3%BCller%2C+R">Ruth M眉ller</a>, <a href="/search/eess?searchtype=author&amp;query=Kaissis%2C+G">Georgios Kaissis</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=R%C3%BCckert%2C+D">Daniel R眉ckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.14198v1-abstract-short" style="display: inline;"> Background: With the ever-increasing amount of medical imaging data, the demand for algorithms to assist clinicians has amplified. Unsupervised anomaly detection (UAD) models promise to aid in the crucial first step of disease detection. While previous studies have thoroughly explored fairness in supervised models in healthcare, for UAD, this has so far been unexplored. Methods: In this study, w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.14198v1-abstract-full').style.display = 'inline'; document.getElementById('2309.14198v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.14198v1-abstract-full" style="display: none;"> Background: With the ever-increasing amount of medical imaging data, the demand for algorithms to assist clinicians has amplified. Unsupervised anomaly detection (UAD) models promise to aid in the crucial first step of disease detection. While previous studies have thoroughly explored fairness in supervised models in healthcare, for UAD, this has so far been unexplored. Methods: In this study, we evaluated how dataset composition regarding subgroups manifests in disparate performance of UAD models along multiple protected variables on three large-scale publicly available chest X-ray datasets. Our experiments were validated using two state-of-the-art UAD models for medical images. Finally, we introduced a novel subgroup-AUROC (sAUROC) metric, which aids in quantifying fairness in machine learning. Findings: Our experiments revealed empirical &#34;fairness laws&#34; (similar to &#34;scaling laws&#34; for Transformers) for training-dataset composition: Linear relationships between anomaly detection performance within a subpopulation and its representation in the training data. Our study further revealed performance disparities, even in the case of balanced training data, and compound effects that exacerbate the drop in performance for subjects associated with multiple adversely affected groups. Interpretation: Our study quantified the disparate performance of UAD models against certain demographic subgroups. Importantly, we showed that this unfairness cannot be mitigated by balanced representation alone. Instead, the representation of some subgroups seems harder to learn by UAD models than that of others. The empirical fairness laws discovered in our study make disparate performance in UAD models easier to estimate and aid in determining the most desirable dataset composition. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.14198v1-abstract-full').style.display = 'none'; document.getElementById('2309.14198v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 5 Figures, 1 panel</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.08643">arXiv:2309.08643</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.08643">pdf</a>, <a href="https://arxiv.org/format/2309.08643">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> NISF: Neural Implicit Segmentation Functions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Stolt-Ans%C3%B3%2C+N">Nil Stolt-Ans贸</a>, <a href="/search/eess?searchtype=author&amp;query=McGinnis%2C+J">Julian McGinnis</a>, <a href="/search/eess?searchtype=author&amp;query=Pan%2C+J">Jiazhen Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Hammernik%2C+K">Kerstin Hammernik</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.08643v1-abstract-short" style="display: inline;"> Segmentation of anatomical shapes from medical images has taken an important role in the automation of clinical measurements. While typical deep-learning segmentation approaches are performed on discrete voxels, the underlying objects being analysed exist in a real-valued continuous space. Approaches that rely on convolutional neural networks (CNNs) are limited to grid-like inputs and not easily a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.08643v1-abstract-full').style.display = 'inline'; document.getElementById('2309.08643v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.08643v1-abstract-full" style="display: none;"> Segmentation of anatomical shapes from medical images has taken an important role in the automation of clinical measurements. While typical deep-learning segmentation approaches are performed on discrete voxels, the underlying objects being analysed exist in a real-valued continuous space. Approaches that rely on convolutional neural networks (CNNs) are limited to grid-like inputs and not easily applicable to sparse or partial measurements. We propose a novel family of image segmentation models that tackle many of CNNs&#39; shortcomings: Neural Implicit Segmentation Functions (NISF). Our framework takes inspiration from the field of neural implicit functions where a network learns a mapping from a real-valued coordinate-space to a shape representation. NISFs have the ability to segment anatomical shapes in high-dimensional continuous spaces. Training is not limited to voxelized grids, and covers applications with sparse and partial data. Interpolation between observations is learnt naturally in the training procedure and requires no post-processing. Furthermore, NISFs allow the leveraging of learnt shape priors to make predictions for regions outside of the original image plane. We go on to show the framework achieves dice scores of 0.87 $\pm$ 0.045 on a (3D+t) short-axis cardiac segmentation task using the UK Biobank dataset. We also provide a qualitative analysis on our frameworks ability to perform segmentation and image interpolation on unseen regions of an image volume at arbitrary resolutions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.08643v1-abstract-full').style.display = 'none'; document.getElementById('2309.08643v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.16863">arXiv:2308.16863</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.16863">pdf</a>, <a href="https://arxiv.org/format/2308.16863">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Self-pruning Graph Neural Network for Predicting Inflammatory Disease Activity in Multiple Sclerosis from Brain MR Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Prabhakar%2C+C">Chinmay Prabhakar</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/eess?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/eess?searchtype=author&amp;query=Loehr%2C+T">Timo Loehr</a>, <a href="/search/eess?searchtype=author&amp;query=Niu%2C+C">Chen Niu</a>, <a href="/search/eess?searchtype=author&amp;query=M%C3%BChlau%2C+M">Mark M眉hlau</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.16863v1-abstract-short" style="display: inline;"> Multiple Sclerosis (MS) is a severe neurological disease characterized by inflammatory lesions in the central nervous system. Hence, predicting inflammatory disease activity is crucial for disease assessment and treatment. However, MS lesions can occur throughout the brain and vary in shape, size and total count among patients. The high variance in lesion load and locations makes it challenging fo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.16863v1-abstract-full').style.display = 'inline'; document.getElementById('2308.16863v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.16863v1-abstract-full" style="display: none;"> Multiple Sclerosis (MS) is a severe neurological disease characterized by inflammatory lesions in the central nervous system. Hence, predicting inflammatory disease activity is crucial for disease assessment and treatment. However, MS lesions can occur throughout the brain and vary in shape, size and total count among patients. The high variance in lesion load and locations makes it challenging for machine learning methods to learn a globally effective representation of whole-brain MRI scans to assess and predict disease. Technically it is non-trivial to incorporate essential biomarkers such as lesion load or spatial proximity. Our work represents the first attempt to utilize graph neural networks (GNN) to aggregate these biomarkers for a novel global representation. We propose a two-stage MS inflammatory disease activity prediction approach. First, a 3D segmentation network detects lesions, and a self-supervised algorithm extracts their image features. Second, the detected lesions are used to build a patient graph. The lesions act as nodes in the graph and are initialized with image features extracted in the first stage. Finally, the lesions are connected based on their spatial proximity and the inflammatory disease activity prediction is formulated as a graph classification task. Furthermore, we propose a self-pruning strategy to auto-select the most critical lesions for prediction. Our proposed method outperforms the existing baseline by a large margin (AUCs of 0.67 vs. 0.61 and 0.66 vs. 0.60 for one-year and two-year inflammatory disease activity, respectively). Finally, our proposed method enjoys inherent explainability by assigning an importance score to each lesion for the overall prediction. Code is available at https://github.com/chinmay5/ms_ida.git <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.16863v1-abstract-full').style.display = 'none'; document.getElementById('2308.16863v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.14365">arXiv:2308.14365</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.14365">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s43856-024-00670-0">10.1038/s43856-024-00670-0 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Using UK Biobank data to establish population-specific atlases from whole body MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Starck%2C+S">Sophie Starck</a>, <a href="/search/eess?searchtype=author&amp;query=Sideri-Lampretsa%2C+V">Vasiliki Sideri-Lampretsa</a>, <a href="/search/eess?searchtype=author&amp;query=Ritter%2C+J+J+M">Jessica J. M. Ritter</a>, <a href="/search/eess?searchtype=author&amp;query=Zimmer%2C+V+A">Veronika A. Zimmer</a>, <a href="/search/eess?searchtype=author&amp;query=Braren%2C+R">Rickmer Braren</a>, <a href="/search/eess?searchtype=author&amp;query=Mueller%2C+T+T">Tamara T. Mueller</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.14365v3-abstract-short" style="display: inline;"> Reliable reference data in medical imaging is largely unavailable. Developing tools that allow for the comparison of individual patient data to reference data has a high potential to improve diagnostic imaging. Population atlases are a commonly used tool in medical imaging to facilitate this. Constructing such atlases becomes particularly challenging when working with highly heterogeneous datasets&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.14365v3-abstract-full').style.display = 'inline'; document.getElementById('2308.14365v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.14365v3-abstract-full" style="display: none;"> Reliable reference data in medical imaging is largely unavailable. Developing tools that allow for the comparison of individual patient data to reference data has a high potential to improve diagnostic imaging. Population atlases are a commonly used tool in medical imaging to facilitate this. Constructing such atlases becomes particularly challenging when working with highly heterogeneous datasets, such as whole-body images, which contain significant anatomical variations. In this work, we propose a pipeline for generating a standardised whole-body atlas for a highly heterogeneous population by partitioning the population into anatomically meaningful subgroups. Using magnetic resonance images from the UK Biobank dataset, we create six whole-body atlases representing a healthy population average. We furthermore unbias them, and this way obtain a realistic representation of the population. In addition to the anatomical atlases, we generate probabilistic atlases that capture the distributions of abdominal fat (visceral and subcutaneous) and five abdominal organs across the population (liver, spleen, pancreas, left and right kidneys). Our pipeline effectively generates high-quality, realistic whole-body atlases with clinical applicability. The probabilistic atlases show differences in fat distribution between subjects with medical conditions such as diabetes and cardiovascular diseases and healthy subjects in the atlas space. With this work, we make the constructed anatomical and label atlases publically available, with the expectation that they will support medical research involving whole-body MR images. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.14365v3-abstract-full').style.display = 'none'; document.getElementById('2308.14365v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in Communications Medicine. Nature Portfolio</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Commun Med 4(1), 237 (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.13861">arXiv:2308.13861</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.13861">pdf</a>, <a href="https://arxiv.org/format/2308.13861">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Bias in Unsupervised Anomaly Detection in Brain MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Bercea%2C+C+I">Cosmin I. Bercea</a>, <a href="/search/eess?searchtype=author&amp;query=Puyol-Ant%C3%B3n%2C+E">Esther Puyol-Ant贸n</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a>, <a href="/search/eess?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.13861v1-abstract-short" style="display: inline;"> Unsupervised anomaly detection methods offer a promising and flexible alternative to supervised approaches, holding the potential to revolutionize medical scan analysis and enhance diagnostic performance. In the current landscape, it is commonly assumed that differences between a test case and the training distribution are attributed solely to pathological conditions, implying that any disparity&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.13861v1-abstract-full').style.display = 'inline'; document.getElementById('2308.13861v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.13861v1-abstract-full" style="display: none;"> Unsupervised anomaly detection methods offer a promising and flexible alternative to supervised approaches, holding the potential to revolutionize medical scan analysis and enhance diagnostic performance. In the current landscape, it is commonly assumed that differences between a test case and the training distribution are attributed solely to pathological conditions, implying that any disparity indicates an anomaly. However, the presence of other potential sources of distributional shift, including scanner, age, sex, or race, is frequently overlooked. These shifts can significantly impact the accuracy of the anomaly detection task. Prominent instances of such failures have sparked concerns regarding the bias, credibility, and fairness of anomaly detection. This work presents a novel analysis of biases in unsupervised anomaly detection. By examining potential non-pathological distributional shifts between the training and testing distributions, we shed light on the extent of these biases and their influence on anomaly detection results. Moreover, this study examines the algorithmic limitations that arise due to biases, providing valuable insights into the challenges encountered by anomaly detection algorithms in accurately learning and capturing the entire range of variability present in the normative distribution. Through this analysis, we aim to enhance the understanding of these biases and pave the way for future improvements in the field. Here, we specifically investigate Alzheimer&#39;s disease detection from brain MR imaging as a case study, revealing significant biases related to sex, race, and scanner variations that substantially impact the results. These findings align with the broader goal of improving the reliability, fairness, and effectiveness of anomaly detection in medical imaging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.13861v1-abstract-full').style.display = 'none'; document.getElementById('2308.13861v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.09345">arXiv:2308.09345</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.09345">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1186/s41747-023-00385-2">10.1186/s41747-023-00385-2 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Denoising diffusion-based MRI to CT image translation enables automated spinal segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Graf%2C+R">Robert Graf</a>, <a href="/search/eess?searchtype=author&amp;query=Schmitt%2C+J">Joachim Schmitt</a>, <a href="/search/eess?searchtype=author&amp;query=Schlaeger%2C+S">Sarah Schlaeger</a>, <a href="/search/eess?searchtype=author&amp;query=M%C3%B6ller%2C+H+K">Hendrik Kristian M枚ller</a>, <a href="/search/eess?searchtype=author&amp;query=Sideri-Lampretsa%2C+V">Vasiliki Sideri-Lampretsa</a>, <a href="/search/eess?searchtype=author&amp;query=Sekuboyina%2C+A">Anjany Sekuboyina</a>, <a href="/search/eess?searchtype=author&amp;query=Krieg%2C+S+M">Sandro Manuel Krieg</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Kirschke%2C+J+S">Jan Stefan Kirschke</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.09345v2-abstract-short" style="display: inline;"> Background: Automated segmentation of spinal MR images plays a vital role both scientifically and clinically. However, accurately delineating posterior spine structures presents challenges. Methods: This retrospective study, approved by the ethical committee, involved translating T1w and T2w MR image series into CT images in a total of n=263 pairs of CT/MR series. Landmark-based registration was&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.09345v2-abstract-full').style.display = 'inline'; document.getElementById('2308.09345v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.09345v2-abstract-full" style="display: none;"> Background: Automated segmentation of spinal MR images plays a vital role both scientifically and clinically. However, accurately delineating posterior spine structures presents challenges. Methods: This retrospective study, approved by the ethical committee, involved translating T1w and T2w MR image series into CT images in a total of n=263 pairs of CT/MR series. Landmark-based registration was performed to align image pairs. We compared 2D paired (Pix2Pix, denoising diffusion implicit models (DDIM) image mode, DDIM noise mode) and unpaired (contrastive unpaired translation, SynDiff) image-to-image translation using &#34;peak signal to noise ratio&#34; (PSNR) as quality measure. A publicly available segmentation network segmented the synthesized CT datasets, and Dice scores were evaluated on in-house test sets and the &#34;MRSpineSeg Challenge&#34; volumes. The 2D findings were extended to 3D Pix2Pix and DDIM. Results: 2D paired methods and SynDiff exhibited similar translation performance and Dice scores on paired data. DDIM image mode achieved the highest image quality. SynDiff, Pix2Pix, and DDIM image mode demonstrated similar Dice scores (0.77). For craniocaudal axis rotations, at least two landmarks per vertebra were required for registration. The 3D translation outperformed the 2D approach, resulting in improved Dice scores (0.80) and anatomically accurate segmentations in a higher resolution than the original MR image. Conclusion: Two landmarks per vertebra registration enabled paired image-to-image translation from MR to CT and outperformed all unpaired approaches. The 3D techniques provided anatomically correct segmentations, avoiding underprediction of small structures like the spinous process. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.09345v2-abstract-full').style.display = 'none'; document.getElementById('2308.09345v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">35 pages, 7 figures, Code and a model weights available https://doi.org/10.5281/zenodo.8221159 and https://doi.org/10.5281/zenodo.8198697</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T99 68U10 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.1 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Eur Radiol Exp 7, 70 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.07885">arXiv:2308.07885</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.07885">pdf</a>, <a href="https://arxiv.org/format/2308.07885">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> The Challenge of Fetal Cardiac MRI Reconstruction Using Deep Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Prokopenko%2C+D">Denis Prokopenko</a>, <a href="/search/eess?searchtype=author&amp;query=Hammernik%2C+K">Kerstin Hammernik</a>, <a href="/search/eess?searchtype=author&amp;query=Roberts%2C+T">Thomas Roberts</a>, <a href="/search/eess?searchtype=author&amp;query=Lloyd%2C+D+F+A">David F A Lloyd</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Hajnal%2C+J+V">Joseph V Hajnal</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.07885v1-abstract-short" style="display: inline;"> Dynamic free-breathing fetal cardiac MRI is one of the most challenging modalities, which requires high temporal and spatial resolution to depict rapid changes in a small fetal heart. The ability of deep learning methods to recover undersampled data could help to optimise the kt-SENSE acquisition strategy and improve non-gated kt-SENSE reconstruction quality. In this work, we explore supervised de&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.07885v1-abstract-full').style.display = 'inline'; document.getElementById('2308.07885v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.07885v1-abstract-full" style="display: none;"> Dynamic free-breathing fetal cardiac MRI is one of the most challenging modalities, which requires high temporal and spatial resolution to depict rapid changes in a small fetal heart. The ability of deep learning methods to recover undersampled data could help to optimise the kt-SENSE acquisition strategy and improve non-gated kt-SENSE reconstruction quality. In this work, we explore supervised deep learning networks for reconstruction of kt-SENSE style acquired data using an extensive in vivo dataset. Having access to fully-sampled low-resolution multi-coil fetal cardiac MRI, we study the performance of the networks to recover fully-sampled data from undersampled data. We consider model architectures together with training strategies taking into account their application in the real clinical setup used to collect the dataset to enable networks to recover prospectively undersampled data. We explore a set of modifications to form a baseline performance evaluation for dynamic fetal cardiac MRI on real data. We systematically evaluate the models on coil-combined data to reveal the effect of the suggested changes to the architecture in the context of fetal heart properties. We show that the best-performers recover a detailed depiction of the maternal anatomy on a large scale, but the dynamic properties of the fetal heart are under-represented. Training directly on multi-coil data improves the performance of the models, allows their prospective application to undersampled data and makes them outperform CTFNet introduced for adult cardiac cine MRI. However, these models deliver similar qualitative performances recovering the maternal body very well but underestimating the dynamic properties of fetal heart. This dynamic feature of fast change of fetal heart that is highly localised suggests both more targeted training and evaluation methods might be needed for fetal heart application. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.07885v1-abstract-full').style.display = 'none'; document.getElementById('2308.07885v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.05764">arXiv:2308.05764</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.05764">pdf</a>, <a href="https://arxiv.org/format/2308.05764">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Unlocking the Diagnostic Potential of ECG through Knowledge Transfer from Cardiac MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Turgut%2C+%C3%96">脰zg眉n Turgut</a>, <a href="/search/eess?searchtype=author&amp;query=M%C3%BCller%2C+P">Philip M眉ller</a>, <a href="/search/eess?searchtype=author&amp;query=Hager%2C+P">Paul Hager</a>, <a href="/search/eess?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/eess?searchtype=author&amp;query=Starck%2C+S">Sophie Starck</a>, <a href="/search/eess?searchtype=author&amp;query=Menten%2C+M+J">Martin J. Menten</a>, <a href="/search/eess?searchtype=author&amp;query=Martens%2C+E">Eimo Martens</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.05764v1-abstract-short" style="display: inline;"> The electrocardiogram (ECG) is a widely available diagnostic tool that allows for a cost-effective and fast assessment of the cardiovascular health. However, more detailed examination with expensive cardiac magnetic resonance (CMR) imaging is often preferred for the diagnosis of cardiovascular diseases. While providing detailed visualization of the cardiac anatomy, CMR imaging is not widely availa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.05764v1-abstract-full').style.display = 'inline'; document.getElementById('2308.05764v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.05764v1-abstract-full" style="display: none;"> The electrocardiogram (ECG) is a widely available diagnostic tool that allows for a cost-effective and fast assessment of the cardiovascular health. However, more detailed examination with expensive cardiac magnetic resonance (CMR) imaging is often preferred for the diagnosis of cardiovascular diseases. While providing detailed visualization of the cardiac anatomy, CMR imaging is not widely available due to long scan times and high costs. To address this issue, we propose the first self-supervised contrastive approach that transfers domain-specific information from CMR images to ECG embeddings. Our approach combines multimodal contrastive learning with masked data modeling to enable holistic cardiac screening solely from ECG data. In extensive experiments using data from 40,044 UK Biobank subjects, we demonstrate the utility and generalizability of our method. We predict the subject-specific risk of various cardiovascular diseases and determine distinct cardiac phenotypes solely from ECG data. In a qualitative analysis, we demonstrate that our learned ECG embeddings incorporate information from CMR image regions of interest. We make our entire pipeline publicly available, including the source code and pre-trained model weights. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.05764v1-abstract-full').style.display = 'none'; document.getElementById('2308.05764v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.05474">arXiv:2308.05474</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.05474">pdf</a>, <a href="https://arxiv.org/format/2308.05474">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Spatio-Temporal Encoding of Brain Dynamics with Surface Masked Autoencoders </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Dahan%2C+S">Simon Dahan</a>, <a href="/search/eess?searchtype=author&amp;query=Williams%2C+L+Z+J">Logan Z. J. Williams</a>, <a href="/search/eess?searchtype=author&amp;query=Guo%2C+Y">Yourong Guo</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Robinson%2C+E+C">Emma C. Robinson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.05474v3-abstract-short" style="display: inline;"> The development of robust and generalisable models for encoding the spatio-temporal dynamics of human brain activity is crucial for advancing neuroscientific discoveries. However, significant individual variation in the organisation of the human cerebral cortex makes it difficult to identify population-level trends in these signals. Recently, Surface Vision Transformers (SiTs) have emerged as a pr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.05474v3-abstract-full').style.display = 'inline'; document.getElementById('2308.05474v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.05474v3-abstract-full" style="display: none;"> The development of robust and generalisable models for encoding the spatio-temporal dynamics of human brain activity is crucial for advancing neuroscientific discoveries. However, significant individual variation in the organisation of the human cerebral cortex makes it difficult to identify population-level trends in these signals. Recently, Surface Vision Transformers (SiTs) have emerged as a promising approach for modelling cortical signals, yet they face some limitations in low-data scenarios due to the lack of inductive biases in their architecture. To address these challenges, this paper proposes the surface Masked AutoEncoder (sMAE) and video surface Masked AutoEncoder (vsMAE) - for multivariate and spatio-temporal pre-training of cortical signals over regular icosahedral grids. These models are trained to reconstruct cortical feature maps from masked versions of the input by learning strong latent representations of cortical structure and function. Such representations translate into better modelling of individual phenotypes and enhanced performance in downstream tasks. The proposed approach was evaluated on cortical phenotype regression using data from the young adult Human Connectome Project (HCP) and developing HCP (dHCP). Results show that (v)sMAE pre-trained models improve phenotyping prediction performance on multiple tasks by $\ge 26\%$, and offer faster convergence relative to models trained from scratch. Finally, we show that pre-training vision transformers on large datasets, such as the UK Biobank (UKB), supports transfer learning to low-data regimes. Our code and pre-trained models are publicly available at https://github.com/metrics-lab/surface-masked-autoencoders . <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.05474v3-abstract-full').style.display = 'none'; document.getElementById('2308.05474v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publications for MIDL 2024; 20 figures; 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.02493">arXiv:2308.02493</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.02493">pdf</a>, <a href="https://arxiv.org/format/2308.02493">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Body Fat Estimation from Surface Meshes using Graph Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Mueller%2C+T+T">Tamara T. Mueller</a>, <a href="/search/eess?searchtype=author&amp;query=Zhou%2C+S">Siyu Zhou</a>, <a href="/search/eess?searchtype=author&amp;query=Starck%2C+S">Sophie Starck</a>, <a href="/search/eess?searchtype=author&amp;query=Jungmann%2C+F">Friederike Jungmann</a>, <a href="/search/eess?searchtype=author&amp;query=Ziller%2C+A">Alexander Ziller</a>, <a href="/search/eess?searchtype=author&amp;query=Aksoy%2C+O">Orhun Aksoy</a>, <a href="/search/eess?searchtype=author&amp;query=Movchan%2C+D">Danylo Movchan</a>, <a href="/search/eess?searchtype=author&amp;query=Braren%2C+R">Rickmer Braren</a>, <a href="/search/eess?searchtype=author&amp;query=Kaissis%2C+G">Georgios Kaissis</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.02493v3-abstract-short" style="display: inline;"> Body fat volume and distribution can be a strong indication for a person&#39;s overall health and the risk for developing diseases like type 2 diabetes and cardiovascular diseases. Frequently used measures for fat estimation are the body mass index (BMI), waist circumference, or the waist-hip-ratio. However, those are rather imprecise measures that do not allow for a discrimination between different t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.02493v3-abstract-full').style.display = 'inline'; document.getElementById('2308.02493v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.02493v3-abstract-full" style="display: none;"> Body fat volume and distribution can be a strong indication for a person&#39;s overall health and the risk for developing diseases like type 2 diabetes and cardiovascular diseases. Frequently used measures for fat estimation are the body mass index (BMI), waist circumference, or the waist-hip-ratio. However, those are rather imprecise measures that do not allow for a discrimination between different types of fat or between fat and muscle tissue. The estimation of visceral (VAT) and abdominal subcutaneous (ASAT) adipose tissue volume has shown to be a more accurate measure for named risk factors. In this work, we show that triangulated body surface meshes can be used to accurately predict VAT and ASAT volumes using graph neural networks. Our methods achieve high performance while reducing training time and required resources compared to state-of-the-art convolutional neural networks in this area. We furthermore envision this method to be applicable to cheaper and easily accessible medical surface scans instead of expensive medical images. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.02493v3-abstract-full').style.display = 'none'; document.getElementById('2308.02493v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.01318">arXiv:2308.01318</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.01318">pdf</a>, <a href="https://arxiv.org/format/2308.01318">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> </div> <p class="title is-5 mathjax"> Framing image registration as a landmark detection problem for label-noise-aware task representation (HitR) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/eess?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Campi%2C+F">Francesco Campi</a>, <a href="/search/eess?searchtype=author&amp;query=Kukuljan%2C+I">Ivan Kukuljan</a>, <a href="/search/eess?searchtype=author&amp;query=Ehrlich%2C+S">Stefan Ehrlich</a>, <a href="/search/eess?searchtype=author&amp;query=Vinayahalingam%2C+S">Shankeeth Vinayahalingam</a>, <a href="/search/eess?searchtype=author&amp;query=Baheti%2C+B">Bhakti Baheti</a>, <a href="/search/eess?searchtype=author&amp;query=Chakrabarty%2C+S">Satrajit Chakrabarty</a>, <a href="/search/eess?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/eess?searchtype=author&amp;query=Bakas%2C+S">Spyridon Bakas</a>, <a href="/search/eess?searchtype=author&amp;query=Schwarting%2C+J">Julian Schwarting</a>, <a href="/search/eess?searchtype=author&amp;query=Metz%2C+M">Marie Metz</a>, <a href="/search/eess?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Heckemann%2C+R+A">Rolf A. Heckemann</a>, <a href="/search/eess?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a>, <a href="/search/eess?searchtype=author&amp;query=Menze%2C+B+H">Bjoern H. Menze</a>, <a href="/search/eess?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.01318v2-abstract-short" style="display: inline;"> Accurate image registration is pivotal in biomedical image analysis, where selecting suitable registration algorithms demands careful consideration. While numerous algorithms are available, the evaluation metrics to assess their performance have remained relatively static. This study addresses this challenge by introducing a novel evaluation metric termed Landmark Hit Rate (HitR), which focuses on&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.01318v2-abstract-full').style.display = 'inline'; document.getElementById('2308.01318v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.01318v2-abstract-full" style="display: none;"> Accurate image registration is pivotal in biomedical image analysis, where selecting suitable registration algorithms demands careful consideration. While numerous algorithms are available, the evaluation metrics to assess their performance have remained relatively static. This study addresses this challenge by introducing a novel evaluation metric termed Landmark Hit Rate (HitR), which focuses on the clinical relevance of image registration accuracy. Unlike traditional metrics such as Target Registration Error, which emphasize subresolution differences, HitR considers whether registration algorithms successfully position landmarks within defined confidence zones. This paradigm shift acknowledges the inherent annotation noise in medical images, allowing for more meaningful assessments. To equip HitR with label-noise-awareness, we propose defining these confidence zones based on an Inter-rater Variance analysis. Consequently, hit rate curves are computed for varying landmark zone sizes, enabling performance measurement for a task-specific level of accuracy. Our approach offers a more realistic and meaningful assessment of image registration algorithms, reflecting their suitability for clinical and biomedical applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.01318v2-abstract-full').style.display = 'none'; document.getElementById('2308.01318v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.00402">arXiv:2308.00402</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.00402">pdf</a>, <a href="https://arxiv.org/format/2308.00402">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Metrics to Quantify Global Consistency in Synthetic Medical Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Scholz%2C+D">Daniel Scholz</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Menten%2C+M+J">Martin J. Menten</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.00402v1-abstract-short" style="display: inline;"> Image synthesis is increasingly being adopted in medical image processing, for example for data augmentation or inter-modality image translation. In these critical applications, the generated images must fulfill a high standard of biological correctness. A particular requirement for these images is global consistency, i.e an image being overall coherent and structured so that all parts of the imag&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.00402v1-abstract-full').style.display = 'inline'; document.getElementById('2308.00402v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.00402v1-abstract-full" style="display: none;"> Image synthesis is increasingly being adopted in medical image processing, for example for data augmentation or inter-modality image translation. In these critical applications, the generated images must fulfill a high standard of biological correctness. A particular requirement for these images is global consistency, i.e an image being overall coherent and structured so that all parts of the image fit together in a realistic and meaningful way. Yet, established image quality metrics do not explicitly quantify this property of synthetic images. In this work, we introduce two metrics that can measure the global consistency of synthetic images on a per-image basis. To measure the global consistency, we presume that a realistic image exhibits consistent properties, e.g., a person&#39;s body fat in a whole-body MRI, throughout the depicted object or scene. Hence, we quantify global consistency by predicting and comparing explicit attributes of images on patches using supervised trained neural networks. Next, we adapt this strategy to an unlabeled setting by measuring the similarity of implicit image features predicted by a self-supervised trained network. Our results demonstrate that predicting explicit attributes of synthetic images on patches can distinguish globally consistent from inconsistent images. Implicit representations of images are less sensitive to assess global consistency but are still serviceable when labeled data is unavailable. Compared to established metrics, such as the FID, our method can explicitly measure global consistency on a per-image basis, enabling a dedicated analysis of the biological plausibility of single synthetic images. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.00402v1-abstract-full').style.display = 'none'; document.getElementById('2308.00402v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.12672">arXiv:2307.12672</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.12672">pdf</a>, <a href="https://arxiv.org/format/2307.12672">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Global k-Space Interpolation for Dynamic MRI Reconstruction using Masked Image Modeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Pan%2C+J">Jiazhen Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/eess?searchtype=author&amp;query=Turgut%2C+%C3%96">脰zg眉n Turgut</a>, <a href="/search/eess?searchtype=author&amp;query=Huang%2C+W">Wenqi Huang</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/eess?searchtype=author&amp;query=Stolt-Ans%C3%B3%2C+N">Nil Stolt-Ans贸</a>, <a href="/search/eess?searchtype=author&amp;query=K%C3%BCstner%2C+T">Thomas K眉stner</a>, <a href="/search/eess?searchtype=author&amp;query=Hammernik%2C+K">Kerstin Hammernik</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.12672v2-abstract-short" style="display: inline;"> In dynamic Magnetic Resonance Imaging (MRI), k-space is typically undersampled due to limited scan time, resulting in aliasing artifacts in the image domain. Hence, dynamic MR reconstruction requires not only modeling spatial frequency components in the x and y directions of k-space but also considering temporal redundancy. Most previous works rely on image-domain regularizers (priors) to conduct&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.12672v2-abstract-full').style.display = 'inline'; document.getElementById('2307.12672v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.12672v2-abstract-full" style="display: none;"> In dynamic Magnetic Resonance Imaging (MRI), k-space is typically undersampled due to limited scan time, resulting in aliasing artifacts in the image domain. Hence, dynamic MR reconstruction requires not only modeling spatial frequency components in the x and y directions of k-space but also considering temporal redundancy. Most previous works rely on image-domain regularizers (priors) to conduct MR reconstruction. In contrast, we focus on interpolating the undersampled k-space before obtaining images with Fourier transform. In this work, we connect masked image modeling with k-space interpolation and propose a novel Transformer-based k-space Global Interpolation Network, termed k-GIN. Our k-GIN learns global dependencies among low- and high-frequency components of 2D+t k-space and uses it to interpolate unsampled data. Further, we propose a novel k-space Iterative Refinement Module (k-IRM) to enhance the high-frequency components learning. We evaluate our approach on 92 in-house 2D+t cardiac MR subjects and compare it to MR reconstruction methods with image-domain regularizers. Experiments show that our proposed k-space interpolation method quantitatively and qualitatively outperforms baseline methods. Importantly, the proposed approach achieves substantially higher robustness and generalizability in cases of highly-undersampled MR data. For video presentation, poster, GIF results and code please check our project page: https://jzpeterpan.github.io/k-gin.github.io/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.12672v2-abstract-full').style.display = 'none'; document.getElementById('2307.12672v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.11870">arXiv:2307.11870</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.11870">pdf</a>, <a href="https://arxiv.org/format/2307.11870">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> </div> </div> <p class="title is-5 mathjax"> Conditional Temporal Attention Networks for Neonatal Cortical Surface Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Ma%2C+Q">Qiang Ma</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+L">Liu Li</a>, <a href="/search/eess?searchtype=author&amp;query=Kyriakopoulou%2C+V">Vanessa Kyriakopoulou</a>, <a href="/search/eess?searchtype=author&amp;query=Hajnal%2C+J">Joseph Hajnal</a>, <a href="/search/eess?searchtype=author&amp;query=Robinson%2C+E+C">Emma C. Robinson</a>, <a href="/search/eess?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.11870v1-abstract-short" style="display: inline;"> Cortical surface reconstruction plays a fundamental role in modeling the rapid brain development during the perinatal period. In this work, we propose Conditional Temporal Attention Network (CoTAN), a fast end-to-end framework for diffeomorphic neonatal cortical surface reconstruction. CoTAN predicts multi-resolution stationary velocity fields (SVF) from neonatal brain magnetic resonance images (M&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.11870v1-abstract-full').style.display = 'inline'; document.getElementById('2307.11870v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.11870v1-abstract-full" style="display: none;"> Cortical surface reconstruction plays a fundamental role in modeling the rapid brain development during the perinatal period. In this work, we propose Conditional Temporal Attention Network (CoTAN), a fast end-to-end framework for diffeomorphic neonatal cortical surface reconstruction. CoTAN predicts multi-resolution stationary velocity fields (SVF) from neonatal brain magnetic resonance images (MRI). Instead of integrating multiple SVFs, CoTAN introduces attention mechanisms to learn a conditional time-varying velocity field (CTVF) by computing the weighted sum of all SVFs at each integration step. The importance of each SVF, which is estimated by learned attention maps, is conditioned on the age of the neonates and varies with the time step of integration. The proposed CTVF defines a diffeomorphic surface deformation, which reduces mesh self-intersection errors effectively. It only requires 0.21 seconds to deform an initial template mesh to cortical white matter and pial surfaces for each brain hemisphere. CoTAN is validated on the Developing Human Connectome Project (dHCP) dataset with 877 3D brain MR images acquired from preterm and term born neonates. Compared to state-of-the-art baselines, CoTAN achieves superior performance with only 0.12mm geometric error and 0.07% self-intersecting faces. The visualization of our attention maps illustrates that CoTAN indeed learns coarse-to-fine surface deformations automatically without intermediate supervision. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.11870v1-abstract-full').style.display = 'none'; document.getElementById('2307.11870v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by the 26th International Conference on Medical Image Computing and Computer Assisted Intervention, MICCAI 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.07439">arXiv:2307.07439</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.07439">pdf</a>, <a href="https://arxiv.org/format/2307.07439">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.59275/j.melba.2024-682e">10.59275/j.melba.2024-682e <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Atlas-Based Interpretable Age Prediction In Whole-Body MR Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Starck%2C+S">Sophie Starck</a>, <a href="/search/eess?searchtype=author&amp;query=Kini%2C+Y+V">Yadunandan Vivekanand Kini</a>, <a href="/search/eess?searchtype=author&amp;query=Ritter%2C+J+J+M">Jessica Johanna Maria Ritter</a>, <a href="/search/eess?searchtype=author&amp;query=Braren%2C+R">Rickmer Braren</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Mueller%2C+T">Tamara Mueller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.07439v5-abstract-short" style="display: inline;"> Age prediction is an important part of medical assessments and research. It can aid in detecting diseases as well as abnormal ageing by highlighting potential discrepancies between chronological and biological age. To improve understanding of age-related changes in various body parts, we investigate the ageing of the human body on a large scale by using whole-body 3D images. We utilise the Grad-CA&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.07439v5-abstract-full').style.display = 'inline'; document.getElementById('2307.07439v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.07439v5-abstract-full" style="display: none;"> Age prediction is an important part of medical assessments and research. It can aid in detecting diseases as well as abnormal ageing by highlighting potential discrepancies between chronological and biological age. To improve understanding of age-related changes in various body parts, we investigate the ageing of the human body on a large scale by using whole-body 3D images. We utilise the Grad-CAM method to determine the body areas most predictive of a person&#39;s age. In order to expand our analysis beyond individual subjects, we employ registration techniques to generate population-wide importance maps that show the most predictive areas in the body for a whole cohort of subjects. We show that the investigation of the full 3D volume of the whole body and the population-wide analysis can give important insights into which body parts play the most important roles in predicting a person&#39;s age. Our findings reveal three primary areas of interest: the spine, the autochthonous back muscles, and the cardiac region, which exhibits the highest importance. Finally, we investigate differences between subjects that show accelerated and decelerated ageing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.07439v5-abstract-full').style.display = 'none'; document.getElementById('2307.07439v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at the Journal of Machine Learning for Biomedical Imaging (MELBA) https://melba-journal.org/2024:029</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Machine.Learning.for.Biomedical.Imaging. 2 (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.06614">arXiv:2307.06614</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.06614">pdf</a>, <a href="https://arxiv.org/format/2307.06614">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Interpretable 2D Vision Models for 3D Medical Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Ziller%2C+A">Alexander Ziller</a>, <a href="/search/eess?searchtype=author&amp;query=Erdur%2C+A+C">Ayhan Can Erdur</a>, <a href="/search/eess?searchtype=author&amp;query=Trigui%2C+M">Marwa Trigui</a>, <a href="/search/eess?searchtype=author&amp;query=G%C3%BCvenir%2C+A">Alp G眉venir</a>, <a href="/search/eess?searchtype=author&amp;query=Mueller%2C+T+T">Tamara T. Mueller</a>, <a href="/search/eess?searchtype=author&amp;query=M%C3%BCller%2C+P">Philip M眉ller</a>, <a href="/search/eess?searchtype=author&amp;query=Jungmann%2C+F">Friederike Jungmann</a>, <a href="/search/eess?searchtype=author&amp;query=Brandt%2C+J">Johannes Brandt</a>, <a href="/search/eess?searchtype=author&amp;query=Peeken%2C+J">Jan Peeken</a>, <a href="/search/eess?searchtype=author&amp;query=Braren%2C+R">Rickmer Braren</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Kaissis%2C+G">Georgios Kaissis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.06614v3-abstract-short" style="display: inline;"> Training Artificial Intelligence (AI) models on 3D images presents unique challenges compared to the 2D case: Firstly, the demand for computational resources is significantly higher, and secondly, the availability of large datasets for pre-training is often limited, impeding training success. This study proposes a simple approach of adapting 2D networks with an intermediate feature representation&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.06614v3-abstract-full').style.display = 'inline'; document.getElementById('2307.06614v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.06614v3-abstract-full" style="display: none;"> Training Artificial Intelligence (AI) models on 3D images presents unique challenges compared to the 2D case: Firstly, the demand for computational resources is significantly higher, and secondly, the availability of large datasets for pre-training is often limited, impeding training success. This study proposes a simple approach of adapting 2D networks with an intermediate feature representation for processing 3D images. Our method employs attention pooling to learn to assign each slice an importance weight and, by that, obtain a weighted average of all 2D slices. These weights directly quantify the contribution of each slice to the contribution and thus make the model prediction inspectable. We show on all 3D MedMNIST datasets as benchmark and two real-world datasets consisting of several hundred high-resolution CT or MRI scans that our approach performs on par with existing methods. Furthermore, we compare the in-built interpretability of our approach to HiResCam, a state-of-the-art retrospective interpretability approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.06614v3-abstract-full').style.display = 'none'; document.getElementById('2307.06614v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.12242">arXiv:2306.12242</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.12242">pdf</a>, <a href="https://arxiv.org/format/2306.12242">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TMI.2023.3287361">10.1109/TMI.2023.3287361 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Concurrent ischemic lesion age estimation and segmentation of CT brain using a Transformer-based network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Marcus%2C+A">Adam Marcus</a>, <a href="/search/eess?searchtype=author&amp;query=Bentley%2C+P">Paul Bentley</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.12242v1-abstract-short" style="display: inline;"> The cornerstone of stroke care is expedient management that varies depending on the time since stroke onset. Consequently, clinical decision making is centered on accurate knowledge of timing and often requires a radiologist to interpret Computed Tomography (CT) of the brain to confirm the occurrence and age of an event. These tasks are particularly challenging due to the subtle expression of acut&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.12242v1-abstract-full').style.display = 'inline'; document.getElementById('2306.12242v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.12242v1-abstract-full" style="display: none;"> The cornerstone of stroke care is expedient management that varies depending on the time since stroke onset. Consequently, clinical decision making is centered on accurate knowledge of timing and often requires a radiologist to interpret Computed Tomography (CT) of the brain to confirm the occurrence and age of an event. These tasks are particularly challenging due to the subtle expression of acute ischemic lesions and the dynamic nature of their appearance. Automation efforts have not yet applied deep learning to estimate lesion age and treated these two tasks independently, so, have overlooked their inherent complementary relationship. To leverage this, we propose a novel end-to-end multi-task transformer-based network optimized for concurrent segmentation and age estimation of cerebral ischemic lesions. By utilizing gated positional self-attention and CT-specific data augmentation, the proposed method can capture long-range spatial dependencies while maintaining its ability to be trained from scratch under low-data regimes commonly found in medical imaging. Furthermore, to better combine multiple predictions, we incorporate uncertainty by utilizing quantile loss to facilitate estimating a probability density function of lesion age. The effectiveness of our model is then extensively evaluated on a clinical dataset consisting of 776 CT images from two medical centers. Experimental results demonstrate that our method obtains promising performance, with an area under the curve (AUC) of 0.933 for classifying lesion ages &lt;=4.5 hours compared to 0.858 using a conventional approach, and outperforms task-specific state-of-the-art algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.12242v1-abstract-full').style.display = 'none'; document.getElementById('2306.12242v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Rueckert%2C+D&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Rueckert%2C+D&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Rueckert%2C+D&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Rueckert%2C+D&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10