CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;11 of 11 results for author: <span class="mathjax">Mazher, M</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/eess" aria-role="search"> Searching in archive <strong>eess</strong>. <a href="/search/?searchtype=author&amp;query=Mazher%2C+M">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Mazher, M"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Mazher%2C+M&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Mazher, M"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.05330">arXiv:2502.05330</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.05330">pdf</a>, <a href="https://arxiv.org/format/2502.05330">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Multi-Class Segmentation of Aortic Branches and Zones in Computed Tomography Angiography: The AortaSeg24 Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Imran%2C+M">Muhammad Imran</a>, <a href="/search/eess?searchtype=author&amp;query=Krebs%2C+J+R">Jonathan R. Krebs</a>, <a href="/search/eess?searchtype=author&amp;query=Sivaraman%2C+V+B">Vishal Balaji Sivaraman</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+T">Teng Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Kumar%2C+A">Amarjeet Kumar</a>, <a href="/search/eess?searchtype=author&amp;query=Ueland%2C+W+R">Walker R. Ueland</a>, <a href="/search/eess?searchtype=author&amp;query=Fassler%2C+M+J">Michael J. Fassler</a>, <a href="/search/eess?searchtype=author&amp;query=Huang%2C+J">Jinlong Huang</a>, <a href="/search/eess?searchtype=author&amp;query=Sun%2C+X">Xiao Sun</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+L">Lisheng Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Shi%2C+P">Pengcheng Shi</a>, <a href="/search/eess?searchtype=author&amp;query=Rokuss%2C+M">Maximilian Rokuss</a>, <a href="/search/eess?searchtype=author&amp;query=Baumgartner%2C+M">Michael Baumgartner</a>, <a href="/search/eess?searchtype=author&amp;query=Kirchhof%2C+Y">Yannick Kirchhof</a>, <a href="/search/eess?searchtype=author&amp;query=Maier-Hein%2C+K+H">Klaus H. Maier-Hein</a>, <a href="/search/eess?searchtype=author&amp;query=Isensee%2C+F">Fabian Isensee</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+S">Shuolin Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Han%2C+B">Bing Han</a>, <a href="/search/eess?searchtype=author&amp;query=Nguyen%2C+B+T">Bong Thanh Nguyen</a>, <a href="/search/eess?searchtype=author&amp;query=Shin%2C+D">Dong-jin Shin</a>, <a href="/search/eess?searchtype=author&amp;query=Ji-Woo%2C+P">Park Ji-Woo</a>, <a href="/search/eess?searchtype=author&amp;query=Choi%2C+M">Mathew Choi</a>, <a href="/search/eess?searchtype=author&amp;query=Uhm%2C+K">Kwang-Hyun Uhm</a>, <a href="/search/eess?searchtype=author&amp;query=Ko%2C+S">Sung-Jea Ko</a>, <a href="/search/eess?searchtype=author&amp;query=Lee%2C+C">Chanwoong Lee</a> , et al. (38 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.05330v1-abstract-short" style="display: inline;"> Multi-class segmentation of the aorta in computed tomography angiography (CTA) scans is essential for diagnosing and planning complex endovascular treatments for patients with aortic dissections. However, existing methods reduce aortic segmentation to a binary problem, limiting their ability to measure diameters across different branches and zones. Furthermore, no open-source dataset is currently&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05330v1-abstract-full').style.display = 'inline'; document.getElementById('2502.05330v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.05330v1-abstract-full" style="display: none;"> Multi-class segmentation of the aorta in computed tomography angiography (CTA) scans is essential for diagnosing and planning complex endovascular treatments for patients with aortic dissections. However, existing methods reduce aortic segmentation to a binary problem, limiting their ability to measure diameters across different branches and zones. Furthermore, no open-source dataset is currently available to support the development of multi-class aortic segmentation methods. To address this gap, we organized the AortaSeg24 MICCAI Challenge, introducing the first dataset of 100 CTA volumes annotated for 23 clinically relevant aortic branches and zones. This dataset was designed to facilitate both model development and validation. The challenge attracted 121 teams worldwide, with participants leveraging state-of-the-art frameworks such as nnU-Net and exploring novel techniques, including cascaded models, data augmentation strategies, and custom loss functions. We evaluated the submitted algorithms using the Dice Similarity Coefficient (DSC) and Normalized Surface Distance (NSD), highlighting the approaches adopted by the top five performing teams. This paper presents the challenge design, dataset details, evaluation metrics, and an in-depth analysis of the top-performing algorithms. The annotated dataset, evaluation code, and implementations of the leading methods are publicly available to support further research. All resources can be accessed at https://aortaseg24.grand-challenge.org. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05330v1-abstract-full').style.display = 'none'; document.getElementById('2502.05330v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.15588">arXiv:2501.15588</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.15588">pdf</a>, <a href="https://arxiv.org/format/2501.15588">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Tumor Detection, Segmentation and Classification Challenge on Automated 3D Breast Ultrasound: The TDSC-ABUS Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Luo%2C+G">Gongning Luo</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+M">Mingwang Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+H">Hongyu Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Liang%2C+X">Xinjie Liang</a>, <a href="/search/eess?searchtype=author&amp;query=Tao%2C+X">Xing Tao</a>, <a href="/search/eess?searchtype=author&amp;query=Ni%2C+D">Dong Ni</a>, <a href="/search/eess?searchtype=author&amp;query=Jeong%2C+H">Hyunsu Jeong</a>, <a href="/search/eess?searchtype=author&amp;query=Kim%2C+C">Chulhong Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Stock%2C+R">Raphael Stock</a>, <a href="/search/eess?searchtype=author&amp;query=Baumgartner%2C+M">Michael Baumgartner</a>, <a href="/search/eess?searchtype=author&amp;query=Kirchhoff%2C+Y">Yannick Kirchhoff</a>, <a href="/search/eess?searchtype=author&amp;query=Rokuss%2C+M">Maximilian Rokuss</a>, <a href="/search/eess?searchtype=author&amp;query=Maier-Hein%2C+K">Klaus Maier-Hein</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+Z">Zhikai Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Fan%2C+T">Tianyu Fan</a>, <a href="/search/eess?searchtype=author&amp;query=Boutry%2C+N">Nicolas Boutry</a>, <a href="/search/eess?searchtype=author&amp;query=Tereshchenko%2C+D">Dmitry Tereshchenko</a>, <a href="/search/eess?searchtype=author&amp;query=Moine%2C+A">Arthur Moine</a>, <a href="/search/eess?searchtype=author&amp;query=Charmetant%2C+M">Maximilien Charmetant</a>, <a href="/search/eess?searchtype=author&amp;query=Sauer%2C+J">Jan Sauer</a>, <a href="/search/eess?searchtype=author&amp;query=Du%2C+H">Hao Du</a>, <a href="/search/eess?searchtype=author&amp;query=Bai%2C+X">Xiang-Hui Bai</a>, <a href="/search/eess?searchtype=author&amp;query=Raikar%2C+V+P">Vipul Pai Raikar</a>, <a href="/search/eess?searchtype=author&amp;query=Montoya-del-Angel%2C+R">Ricardo Montoya-del-Angel</a>, <a href="/search/eess?searchtype=author&amp;query=Marti%2C+R">Robert Marti</a> , et al. (12 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.15588v1-abstract-short" style="display: inline;"> Breast cancer is one of the most common causes of death among women worldwide. Early detection helps in reducing the number of deaths. Automated 3D Breast Ultrasound (ABUS) is a newer approach for breast screening, which has many advantages over handheld mammography such as safety, speed, and higher detection rate of breast cancer. Tumor detection, segmentation, and classification are key componen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15588v1-abstract-full').style.display = 'inline'; document.getElementById('2501.15588v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.15588v1-abstract-full" style="display: none;"> Breast cancer is one of the most common causes of death among women worldwide. Early detection helps in reducing the number of deaths. Automated 3D Breast Ultrasound (ABUS) is a newer approach for breast screening, which has many advantages over handheld mammography such as safety, speed, and higher detection rate of breast cancer. Tumor detection, segmentation, and classification are key components in the analysis of medical images, especially challenging in the context of 3D ABUS due to the significant variability in tumor size and shape, unclear tumor boundaries, and a low signal-to-noise ratio. The lack of publicly accessible, well-labeled ABUS datasets further hinders the advancement of systems for breast tumor analysis. Addressing this gap, we have organized the inaugural Tumor Detection, Segmentation, and Classification Challenge on Automated 3D Breast Ultrasound 2023 (TDSC-ABUS2023). This initiative aims to spearhead research in this field and create a definitive benchmark for tasks associated with 3D ABUS image analysis. In this paper, we summarize the top-performing algorithms from the challenge and provide critical analysis for ABUS image examination. We offer the TDSC-ABUS challenge as an open-access platform at https://tdsc-abus2023.grand-challenge.org/ to benchmark and inspire future developments in algorithmic research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15588v1-abstract-full').style.display = 'none'; document.getElementById('2501.15588v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09593">arXiv:2411.09593</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09593">pdf</a>, <a href="https://arxiv.org/format/2411.09593">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SMILE-UHURA Challenge -- Small Vessel Segmentation at Mesoscopic Scale from Ultra-High Resolution 7T Magnetic Resonance Angiograms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Chatterjee%2C+S">Soumick Chatterjee</a>, <a href="/search/eess?searchtype=author&amp;query=Mattern%2C+H">Hendrik Mattern</a>, <a href="/search/eess?searchtype=author&amp;query=D%C3%B6rner%2C+M">Marc D枚rner</a>, <a href="/search/eess?searchtype=author&amp;query=Sciarra%2C+A">Alessandro Sciarra</a>, <a href="/search/eess?searchtype=author&amp;query=Dubost%2C+F">Florian Dubost</a>, <a href="/search/eess?searchtype=author&amp;query=Schnurre%2C+H">Hannes Schnurre</a>, <a href="/search/eess?searchtype=author&amp;query=Khatun%2C+R">Rupali Khatun</a>, <a href="/search/eess?searchtype=author&amp;query=Yu%2C+C">Chun-Chih Yu</a>, <a href="/search/eess?searchtype=author&amp;query=Hsieh%2C+T">Tsung-Lin Hsieh</a>, <a href="/search/eess?searchtype=author&amp;query=Tsai%2C+Y">Yi-Shan Tsai</a>, <a href="/search/eess?searchtype=author&amp;query=Fang%2C+Y">Yi-Zeng Fang</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+Y">Yung-Ching Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Huang%2C+J">Juinn-Dar Huang</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+M">Marshall Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+S">Siyu Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Ribeiro%2C+F+L">Fernanda L. Ribeiro</a>, <a href="/search/eess?searchtype=author&amp;query=Bollmann%2C+S">Saskia Bollmann</a>, <a href="/search/eess?searchtype=author&amp;query=Chintalapati%2C+K+V">Karthikesh Varma Chintalapati</a>, <a href="/search/eess?searchtype=author&amp;query=Radhakrishna%2C+C+M">Chethan Mysuru Radhakrishna</a>, <a href="/search/eess?searchtype=author&amp;query=Kumara%2C+S+C+H+R">Sri Chandana Hudukula Ram Kumara</a>, <a href="/search/eess?searchtype=author&amp;query=Sutrave%2C+R">Raviteja Sutrave</a>, <a href="/search/eess?searchtype=author&amp;query=Qayyum%2C+A">Abdul Qayyum</a>, <a href="/search/eess?searchtype=author&amp;query=Mazher%2C+M">Moona Mazher</a>, <a href="/search/eess?searchtype=author&amp;query=Razzak%2C+I">Imran Razzak</a>, <a href="/search/eess?searchtype=author&amp;query=Rodero%2C+C">Cristobal Rodero</a> , et al. (23 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09593v1-abstract-short" style="display: inline;"> The human brain receives nutrients and oxygen through an intricate network of blood vessels. Pathology affecting small vessels, at the mesoscopic scale, represents a critical vulnerability within the cerebral blood supply and can lead to severe conditions, such as Cerebral Small Vessel Diseases. The advent of 7 Tesla MRI systems has enabled the acquisition of higher spatial resolution images, maki&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09593v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09593v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09593v1-abstract-full" style="display: none;"> The human brain receives nutrients and oxygen through an intricate network of blood vessels. Pathology affecting small vessels, at the mesoscopic scale, represents a critical vulnerability within the cerebral blood supply and can lead to severe conditions, such as Cerebral Small Vessel Diseases. The advent of 7 Tesla MRI systems has enabled the acquisition of higher spatial resolution images, making it possible to visualise such vessels in the brain. However, the lack of publicly available annotated datasets has impeded the development of robust, machine learning-driven segmentation algorithms. To address this, the SMILE-UHURA challenge was organised. This challenge, held in conjunction with the ISBI 2023, in Cartagena de Indias, Colombia, aimed to provide a platform for researchers working on related topics. The SMILE-UHURA challenge addresses the gap in publicly available annotated datasets by providing an annotated dataset of Time-of-Flight angiography acquired with 7T MRI. This dataset was created through a combination of automated pre-segmentation and extensive manual refinement. In this manuscript, sixteen submitted methods and two baseline methods are compared both quantitatively and qualitatively on two different datasets: held-out test MRAs from the same dataset as the training data (with labels kept secret) and a separate 7T ToF MRA dataset where both input volumes and labels are kept secret. The results demonstrate that most of the submitted deep learning methods, trained on the provided training dataset, achieved reliable segmentation performance. Dice scores reached up to 0.838 $\pm$ 0.066 and 0.716 $\pm$ 0.125 on the respective datasets, with an average performance of up to 0.804 $\pm$ 0.15. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09593v1-abstract-full').style.display = 'none'; document.getElementById('2411.09593v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.17518">arXiv:2405.17518</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.17518">pdf</a>, <a href="https://arxiv.org/format/2405.17518">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Assessment of Left Atrium Motion Deformation Through Full Cardiac Cycle </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Qayyum%2C+A">Abdul Qayyum</a>, <a href="/search/eess?searchtype=author&amp;query=Mazher%2C+M">Moona Mazher</a>, <a href="/search/eess?searchtype=author&amp;query=Lee%2C+A">Angela Lee</a>, <a href="/search/eess?searchtype=author&amp;query=Solis-Lemus%2C+J+A">Jose A Solis-Lemus</a>, <a href="/search/eess?searchtype=author&amp;query=Razzak%2C+I">Imran Razzak</a>, <a href="/search/eess?searchtype=author&amp;query=Niederer%2C+S+A">Steven A Niederer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.17518v1-abstract-short" style="display: inline;"> Unlike Right Atrium (RA), Left Atrium (LA) presents distinctive challenges, including much thinner myocardial walls, complex and irregular morphology, as well as diversity in individual&#39;s structure, making off-the-shelf methods designed for the Left Ventricle (LV) may not work in the context of the left atrium. To overcome aforementioned challenges, we are the first to present comprehensive techni&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.17518v1-abstract-full').style.display = 'inline'; document.getElementById('2405.17518v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.17518v1-abstract-full" style="display: none;"> Unlike Right Atrium (RA), Left Atrium (LA) presents distinctive challenges, including much thinner myocardial walls, complex and irregular morphology, as well as diversity in individual&#39;s structure, making off-the-shelf methods designed for the Left Ventricle (LV) may not work in the context of the left atrium. To overcome aforementioned challenges, we are the first to present comprehensive technical workflow designed for 4D registration modeling to automatically analyze LA motion using high-resolution 3D Cine MR images. We integrate segmentation network and 4D registration process to precisely delineate LA segmentation throughout the full cardiac cycle. Additionally, an image 4D registration network is employed to extract LA displacement vector fields (DVFs). Our findings show the potential of proposed end to end framework in providing clinicians with novel regional biomarkers for left atrium motion tracking and deformation, carrying significant clinical implications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.17518v1-abstract-full').style.display = 'none'; document.getElementById('2405.17518v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.19425">arXiv:2403.19425</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.19425">pdf</a>, <a href="https://arxiv.org/ps/2403.19425">ps</a>, <a href="https://arxiv.org/format/2403.19425">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A Robust Ensemble Algorithm for Ischemic Stroke Lesion Segmentation: Generalizability and Clinical Utility Beyond the ISLES Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/eess?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/eess?searchtype=author&amp;query=Liew%2C+S">Sook-Lei Liew</a>, <a href="/search/eess?searchtype=author&amp;query=Hutton%2C+A">Alexandre Hutton</a>, <a href="/search/eess?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/eess?searchtype=author&amp;query=Kaesmacher%2C+J">Johannes Kaesmacher</a>, <a href="/search/eess?searchtype=author&amp;query=Hanning%2C+U">Uta Hanning</a>, <a href="/search/eess?searchtype=author&amp;query=Hakim%2C+A">Arsany Hakim</a>, <a href="/search/eess?searchtype=author&amp;query=Zubal%2C+R">Richard Zubal</a>, <a href="/search/eess?searchtype=author&amp;query=Valenzuela%2C+W">Waldo Valenzuela</a>, <a href="/search/eess?searchtype=author&amp;query=Robben%2C+D">David Robben</a>, <a href="/search/eess?searchtype=author&amp;query=Sima%2C+D+M">Diana M. Sima</a>, <a href="/search/eess?searchtype=author&amp;query=Anania%2C+V">Vincenzo Anania</a>, <a href="/search/eess?searchtype=author&amp;query=Brys%2C+A">Arne Brys</a>, <a href="/search/eess?searchtype=author&amp;query=Meakin%2C+J+A">James A. Meakin</a>, <a href="/search/eess?searchtype=author&amp;query=Mickan%2C+A">Anne Mickan</a>, <a href="/search/eess?searchtype=author&amp;query=Broocks%2C+G">Gabriel Broocks</a>, <a href="/search/eess?searchtype=author&amp;query=Heitkamp%2C+C">Christian Heitkamp</a>, <a href="/search/eess?searchtype=author&amp;query=Gao%2C+S">Shengbo Gao</a>, <a href="/search/eess?searchtype=author&amp;query=Liang%2C+K">Kongming Liang</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+Z">Ziji Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Siddiquee%2C+M+M+R">Md Mahfuzur Rahman Siddiquee</a>, <a href="/search/eess?searchtype=author&amp;query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/eess?searchtype=author&amp;query=Ashtari%2C+P">Pooya Ashtari</a>, <a href="/search/eess?searchtype=author&amp;query=Van+Huffel%2C+S">Sabine Van Huffel</a> , et al. (33 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.19425v2-abstract-short" style="display: inline;"> Diffusion-weighted MRI (DWI) is essential for stroke diagnosis, treatment decisions, and prognosis. However, image and disease variability hinder the development of generalizable AI algorithms with clinical value. We address this gap by presenting a novel ensemble algorithm derived from the 2022 Ischemic Stroke Lesion Segmentation (ISLES) challenge. ISLES&#39;22 provided 400 patient scans with ischemi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19425v2-abstract-full').style.display = 'inline'; document.getElementById('2403.19425v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.19425v2-abstract-full" style="display: none;"> Diffusion-weighted MRI (DWI) is essential for stroke diagnosis, treatment decisions, and prognosis. However, image and disease variability hinder the development of generalizable AI algorithms with clinical value. We address this gap by presenting a novel ensemble algorithm derived from the 2022 Ischemic Stroke Lesion Segmentation (ISLES) challenge. ISLES&#39;22 provided 400 patient scans with ischemic stroke from various medical centers, facilitating the development of a wide range of cutting-edge segmentation algorithms by the research community. Through collaboration with leading teams, we combined top-performing algorithms into an ensemble model that overcomes the limitations of individual solutions. Our ensemble model achieved superior ischemic lesion detection and segmentation accuracy on our internal test set compared to individual algorithms. This accuracy generalized well across diverse image and disease variables. Furthermore, the model excelled in extracting clinical biomarkers. Notably, in a Turing-like test, neuroradiologists consistently preferred the algorithm&#39;s segmentations over manual expert efforts, highlighting increased comprehensiveness and precision. Validation using a real-world external dataset (N=1686) confirmed the model&#39;s generalizability. The algorithm&#39;s outputs also demonstrated strong correlations with clinical scores (admission NIHSS and 90-day mRS) on par with or exceeding expert-derived results, underlining its clinical relevance. This study offers two key findings. First, we present an ensemble algorithm (https://github.com/Tabrisrei/ISLES22_Ensemble) that detects and segments ischemic stroke lesions on DWI across diverse scenarios on par with expert (neuro)radiologists. Second, we show the potential for biomedical challenge outputs to extend beyond the challenge&#39;s initial objectives, demonstrating their real-world clinical applicability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19425v2-abstract-full').style.display = 'none'; document.getElementById('2403.19425v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.09463">arXiv:2402.09463</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.09463">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TMI.2024.3485554">10.1109/TMI.2024.3485554 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Multi-Center Fetal Brain Tissue Annotation (FeTA) Challenge 2022 Results </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Payette%2C+K">Kelly Payette</a>, <a href="/search/eess?searchtype=author&amp;query=Steger%2C+C">C茅line Steger</a>, <a href="/search/eess?searchtype=author&amp;query=Licandro%2C+R">Roxane Licandro</a>, <a href="/search/eess?searchtype=author&amp;query=de+Dumast%2C+P">Priscille de Dumast</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/eess?searchtype=author&amp;query=Barkovich%2C+M">Matthew Barkovich</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+L">Liu Li</a>, <a href="/search/eess?searchtype=author&amp;query=Dannecker%2C+M">Maik Dannecker</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+C">Chen Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Ouyang%2C+C">Cheng Ouyang</a>, <a href="/search/eess?searchtype=author&amp;query=McConnell%2C+N">Niccol貌 McConnell</a>, <a href="/search/eess?searchtype=author&amp;query=Miron%2C+A">Alina Miron</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Y">Yongmin Li</a>, <a href="/search/eess?searchtype=author&amp;query=Uus%2C+A">Alena Uus</a>, <a href="/search/eess?searchtype=author&amp;query=Grigorescu%2C+I">Irina Grigorescu</a>, <a href="/search/eess?searchtype=author&amp;query=Gilliland%2C+P+R">Paula Ramirez Gilliland</a>, <a href="/search/eess?searchtype=author&amp;query=Siddiquee%2C+M+M+R">Md Mahfuzur Rahman Siddiquee</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+D">Daguang Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+H">Haoyu Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Huang%2C+Z">Ziyan Huang</a>, <a href="/search/eess?searchtype=author&amp;query=Ye%2C+J">Jin Ye</a>, <a href="/search/eess?searchtype=author&amp;query=Aleny%C3%A0%2C+M">Mireia Aleny脿</a>, <a href="/search/eess?searchtype=author&amp;query=Comte%2C+V">Valentin Comte</a>, <a href="/search/eess?searchtype=author&amp;query=Camara%2C+O">Oscar Camara</a> , et al. (42 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.09463v1-abstract-short" style="display: inline;"> Segmentation is a critical step in analyzing the developing human fetal brain. There have been vast improvements in automatic segmentation methods in the past several years, and the Fetal Brain Tissue Annotation (FeTA) Challenge 2021 helped to establish an excellent standard of fetal brain segmentation. However, FeTA 2021 was a single center study, and the generalizability of algorithms across dif&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.09463v1-abstract-full').style.display = 'inline'; document.getElementById('2402.09463v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.09463v1-abstract-full" style="display: none;"> Segmentation is a critical step in analyzing the developing human fetal brain. There have been vast improvements in automatic segmentation methods in the past several years, and the Fetal Brain Tissue Annotation (FeTA) Challenge 2021 helped to establish an excellent standard of fetal brain segmentation. However, FeTA 2021 was a single center study, and the generalizability of algorithms across different imaging centers remains unsolved, limiting real-world clinical applicability. The multi-center FeTA Challenge 2022 focuses on advancing the generalizability of fetal brain segmentation algorithms for magnetic resonance imaging (MRI). In FeTA 2022, the training dataset contained images and corresponding manually annotated multi-class labels from two imaging centers, and the testing data contained images from these two imaging centers as well as two additional unseen centers. The data from different centers varied in many aspects, including scanners used, imaging parameters, and fetal brain super-resolution algorithms applied. 16 teams participated in the challenge, and 17 algorithms were evaluated. Here, a detailed overview and analysis of the challenge results are provided, focusing on the generalizability of the submissions. Both in- and out of domain, the white matter and ventricles were segmented with the highest accuracy, while the most challenging structure remains the cerebral cortex due to anatomical complexity. The FeTA Challenge 2022 was able to successfully evaluate and advance generalizability of multi-class fetal brain tissue segmentation algorithms for MRI and it continues to benchmark new algorithms. The resulting new methods contribute to improving the analysis of brain development in utero. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.09463v1-abstract-full').style.display = 'none'; document.getElementById('2402.09463v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Results from FeTA Challenge 2022, held at MICCAI; Manuscript submitted to IEEE Transactions on Medical Imaging (2024). Supplementary Info (including submission methods descriptions) available here: https://zenodo.org/records/10628648</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.13752">arXiv:2312.13752</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.13752">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.media.2024.103253">10.1016/j.media.2024.103253 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Hunting imaging biomarkers in pulmonary fibrosis: Benchmarks of the AIIB23 challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Nan%2C+Y">Yang Nan</a>, <a href="/search/eess?searchtype=author&amp;query=Xing%2C+X">Xiaodan Xing</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+S">Shiyi Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Tang%2C+Z">Zeyu Tang</a>, <a href="/search/eess?searchtype=author&amp;query=Felder%2C+F+N">Federico N Felder</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+S">Sheng Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Ledda%2C+R+E">Roberta Eufrasia Ledda</a>, <a href="/search/eess?searchtype=author&amp;query=Ding%2C+X">Xiaoliu Ding</a>, <a href="/search/eess?searchtype=author&amp;query=Yu%2C+R">Ruiqi Yu</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+W">Weiping Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Shi%2C+F">Feng Shi</a>, <a href="/search/eess?searchtype=author&amp;query=Sun%2C+T">Tianyang Sun</a>, <a href="/search/eess?searchtype=author&amp;query=Cao%2C+Z">Zehong Cao</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+M">Minghui Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Gu%2C+Y">Yun Gu</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+H">Hanxiao Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Gao%2C+J">Jian Gao</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+P">Pingyu Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Tang%2C+W">Wen Tang</a>, <a href="/search/eess?searchtype=author&amp;query=Yu%2C+P">Pengxin Yu</a>, <a href="/search/eess?searchtype=author&amp;query=Kang%2C+H">Han Kang</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+J">Junqiang Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Lu%2C+X">Xing Lu</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+B">Boyu Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Mamalakis%2C+M">Michail Mamalakis</a> , et al. (16 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.13752v2-abstract-short" style="display: inline;"> Airway-related quantitative imaging biomarkers are crucial for examination, diagnosis, and prognosis in pulmonary diseases. However, the manual delineation of airway trees remains prohibitively time-consuming. While significant efforts have been made towards enhancing airway modelling, current public-available datasets concentrate on lung diseases with moderate morphological variations. The intric&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.13752v2-abstract-full').style.display = 'inline'; document.getElementById('2312.13752v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.13752v2-abstract-full" style="display: none;"> Airway-related quantitative imaging biomarkers are crucial for examination, diagnosis, and prognosis in pulmonary diseases. However, the manual delineation of airway trees remains prohibitively time-consuming. While significant efforts have been made towards enhancing airway modelling, current public-available datasets concentrate on lung diseases with moderate morphological variations. The intricate honeycombing patterns present in the lung tissues of fibrotic lung disease patients exacerbate the challenges, often leading to various prediction errors. To address this issue, the &#39;Airway-Informed Quantitative CT Imaging Biomarker for Fibrotic Lung Disease 2023&#39; (AIIB23) competition was organized in conjunction with the official 2023 International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI). The airway structures were meticulously annotated by three experienced radiologists. Competitors were encouraged to develop automatic airway segmentation models with high robustness and generalization abilities, followed by exploring the most correlated QIB of mortality prediction. A training set of 120 high-resolution computerised tomography (HRCT) scans were publicly released with expert annotations and mortality status. The online validation set incorporated 52 HRCT scans from patients with fibrotic lung disease and the offline test set included 140 cases from fibrosis and COVID-19 patients. The results have shown that the capacity of extracting airway trees from patients with fibrotic lung disease could be enhanced by introducing voxel-wise weighted general union loss and continuity loss. In addition to the competitive image biomarkers for prognosis, a strong airway-derived biomarker (Hazard ratio&gt;1.5, p&lt;0.0001) was revealed for survival prognostication compared with existing clinical measurements, clinician assessment and AI-based biomarkers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.13752v2-abstract-full').style.display = 'none'; document.getElementById('2312.13752v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.05745">arXiv:2303.05745</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.05745">pdf</a>, <a href="https://arxiv.org/format/2303.05745">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Multi-site, Multi-domain Airway Tree Modeling (ATM&#39;22): A Public Benchmark for Pulmonary Airway Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+M">Minghui Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+Y">Yangqian Wu</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+H">Hanxiao Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Qin%2C+Y">Yulei Qin</a>, <a href="/search/eess?searchtype=author&amp;query=Zheng%2C+H">Hao Zheng</a>, <a href="/search/eess?searchtype=author&amp;query=Tang%2C+W">Wen Tang</a>, <a href="/search/eess?searchtype=author&amp;query=Arnold%2C+C">Corey Arnold</a>, <a href="/search/eess?searchtype=author&amp;query=Pei%2C+C">Chenhao Pei</a>, <a href="/search/eess?searchtype=author&amp;query=Yu%2C+P">Pengxin Yu</a>, <a href="/search/eess?searchtype=author&amp;query=Nan%2C+Y">Yang Nan</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+G">Guang Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Walsh%2C+S">Simon Walsh</a>, <a href="/search/eess?searchtype=author&amp;query=Marshall%2C+D+C">Dominic C. Marshall</a>, <a href="/search/eess?searchtype=author&amp;query=Komorowski%2C+M">Matthieu Komorowski</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+P">Puyang Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Guo%2C+D">Dazhou Guo</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+D">Dakai Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+Y">Ya&#39;nan Wu</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+S">Shuiqing Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Chang%2C+R">Runsheng Chang</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+B">Boyu Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Lv%2C+X">Xing Lv</a>, <a href="/search/eess?searchtype=author&amp;query=Qayyum%2C+A">Abdul Qayyum</a>, <a href="/search/eess?searchtype=author&amp;query=Mazher%2C+M">Moona Mazher</a>, <a href="/search/eess?searchtype=author&amp;query=Su%2C+Q">Qi Su</a> , et al. (11 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.05745v3-abstract-short" style="display: inline;"> Open international challenges are becoming the de facto standard for assessing computer vision and image analysis algorithms. In recent years, new methods have extended the reach of pulmonary airway segmentation that is closer to the limit of image resolution. Since EXACT&#39;09 pulmonary airway segmentation, limited effort has been directed to quantitative comparison of newly emerged algorithms drive&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.05745v3-abstract-full').style.display = 'inline'; document.getElementById('2303.05745v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.05745v3-abstract-full" style="display: none;"> Open international challenges are becoming the de facto standard for assessing computer vision and image analysis algorithms. In recent years, new methods have extended the reach of pulmonary airway segmentation that is closer to the limit of image resolution. Since EXACT&#39;09 pulmonary airway segmentation, limited effort has been directed to quantitative comparison of newly emerged algorithms driven by the maturity of deep learning based approaches and clinical drive for resolving finer details of distal airways for early intervention of pulmonary diseases. Thus far, public annotated datasets are extremely limited, hindering the development of data-driven methods and detailed performance evaluation of new algorithms. To provide a benchmark for the medical imaging community, we organized the Multi-site, Multi-domain Airway Tree Modeling (ATM&#39;22), which was held as an official challenge event during the MICCAI 2022 conference. ATM&#39;22 provides large-scale CT scans with detailed pulmonary airway annotation, including 500 CT scans (300 for training, 50 for validation, and 150 for testing). The dataset was collected from different sites and it further included a portion of noisy COVID-19 CTs with ground-glass opacity and consolidation. Twenty-three teams participated in the entire phase of the challenge and the algorithms for the top ten teams are reviewed in this paper. Quantitative and qualitative results revealed that deep learning models embedded with the topological continuity enhancement achieved superior performance in general. ATM&#39;22 challenge holds as an open-call design, the training data and the gold standard evaluation are available upon successful registration via its homepage. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.05745v3-abstract-full').style.display = 'none'; document.getElementById('2303.05745v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">32 pages, 16 figures. Homepage: https://atm22.grand-challenge.org/. Submitted</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.03281">arXiv:2301.03281</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2301.03281">pdf</a>, <a href="https://arxiv.org/format/2301.03281">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> The state-of-the-art 3D anisotropic intracranial hemorrhage segmentation on non-contrast head CT: The INSTANCE challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Li%2C+X">Xiangyu Li</a>, <a href="/search/eess?searchtype=author&amp;query=Luo%2C+G">Gongning Luo</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+K">Kuanquan Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+H">Hongyu Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+J">Jun Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Liang%2C+X">Xinjie Liang</a>, <a href="/search/eess?searchtype=author&amp;query=Jiang%2C+J">Jie Jiang</a>, <a href="/search/eess?searchtype=author&amp;query=Song%2C+Z">Zhenghao Song</a>, <a href="/search/eess?searchtype=author&amp;query=Zheng%2C+C">Chunyue Zheng</a>, <a href="/search/eess?searchtype=author&amp;query=Chi%2C+H">Haokai Chi</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+M">Mingwang Xu</a>, <a href="/search/eess?searchtype=author&amp;query=He%2C+Y">Yingte He</a>, <a href="/search/eess?searchtype=author&amp;query=Ma%2C+X">Xinghua Ma</a>, <a href="/search/eess?searchtype=author&amp;query=Guo%2C+J">Jingwen Guo</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+Y">Yifan Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+C">Chuanpu Li</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+Z">Zeli Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Siddiquee%2C+M+M+R">Md Mahfuzur Rahman Siddiquee</a>, <a href="/search/eess?searchtype=author&amp;query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/eess?searchtype=author&amp;query=Sanner%2C+A+P">Antoine P. Sanner</a>, <a href="/search/eess?searchtype=author&amp;query=Mukhopadhyay%2C+A">Anirban Mukhopadhyay</a>, <a href="/search/eess?searchtype=author&amp;query=Othman%2C+A+E">Ahmed E. Othman</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+X">Xingyu Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+W">Weiping Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+J">Jinhuang Zhang</a> , et al. (9 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.03281v2-abstract-short" style="display: inline;"> Automatic intracranial hemorrhage segmentation in 3D non-contrast head CT (NCCT) scans is significant in clinical practice. Existing hemorrhage segmentation methods usually ignores the anisotropic nature of the NCCT, and are evaluated on different in-house datasets with distinct metrics, making it highly challenging to improve segmentation performance and perform objective comparisons among differ&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.03281v2-abstract-full').style.display = 'inline'; document.getElementById('2301.03281v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.03281v2-abstract-full" style="display: none;"> Automatic intracranial hemorrhage segmentation in 3D non-contrast head CT (NCCT) scans is significant in clinical practice. Existing hemorrhage segmentation methods usually ignores the anisotropic nature of the NCCT, and are evaluated on different in-house datasets with distinct metrics, making it highly challenging to improve segmentation performance and perform objective comparisons among different methods. The INSTANCE 2022 was a grand challenge held in conjunction with the 2022 International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI). It is intended to resolve the above-mentioned problems and promote the development of both intracranial hemorrhage segmentation and anisotropic data processing. The INSTANCE released a training set of 100 cases with ground-truth and a validation set with 30 cases without ground-truth labels that were available to the participants. A held-out testing set with 70 cases is utilized for the final evaluation and ranking. The methods from different participants are ranked based on four metrics, including Dice Similarity Coefficient (DSC), Hausdorff Distance (HD), Relative Volume Difference (RVD) and Normalized Surface Dice (NSD). A total of 13 teams submitted distinct solutions to resolve the challenges, making several baseline models, pre-processing strategies and anisotropic data processing techniques available to future researchers. The winner method achieved an average DSC of 0.6925, demonstrating a significant growth over our proposed baseline method. To the best of our knowledge, the proposed INSTANCE challenge releases the first intracranial hemorrhage segmentation benchmark, and is also the first challenge that intended to resolve the anisotropic problem in 3D medical image segmentation, which provides new alternatives in these research fields. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.03281v2-abstract-full').style.display = 'none'; document.getElementById('2301.03281v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Summarized paper for the MICCAI INSTANCE 2022 Challenge</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.12512">arXiv:2206.12512</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.12512">pdf</a>, <a href="https://arxiv.org/format/2206.12512">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Placental Vessel Segmentation and Registration in Fetoscopy: Literature Review and MICCAI FetReg2021 Challenge Findings </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Bano%2C+S">Sophia Bano</a>, <a href="/search/eess?searchtype=author&amp;query=Casella%2C+A">Alessandro Casella</a>, <a href="/search/eess?searchtype=author&amp;query=Vasconcelos%2C+F">Francisco Vasconcelos</a>, <a href="/search/eess?searchtype=author&amp;query=Qayyum%2C+A">Abdul Qayyum</a>, <a href="/search/eess?searchtype=author&amp;query=Benzinou%2C+A">Abdesslam Benzinou</a>, <a href="/search/eess?searchtype=author&amp;query=Mazher%2C+M">Moona Mazher</a>, <a href="/search/eess?searchtype=author&amp;query=Meriaudeau%2C+F">Fabrice Meriaudeau</a>, <a href="/search/eess?searchtype=author&amp;query=Lena%2C+C">Chiara Lena</a>, <a href="/search/eess?searchtype=author&amp;query=Cintorrino%2C+I+A">Ilaria Anita Cintorrino</a>, <a href="/search/eess?searchtype=author&amp;query=De+Paolis%2C+G+R">Gaia Romana De Paolis</a>, <a href="/search/eess?searchtype=author&amp;query=Biagioli%2C+J">Jessica Biagioli</a>, <a href="/search/eess?searchtype=author&amp;query=Grechishnikova%2C+D">Daria Grechishnikova</a>, <a href="/search/eess?searchtype=author&amp;query=Jiao%2C+J">Jing Jiao</a>, <a href="/search/eess?searchtype=author&amp;query=Bai%2C+B">Bizhe Bai</a>, <a href="/search/eess?searchtype=author&amp;query=Qiao%2C+Y">Yanyan Qiao</a>, <a href="/search/eess?searchtype=author&amp;query=Bhattarai%2C+B">Binod Bhattarai</a>, <a href="/search/eess?searchtype=author&amp;query=Gaire%2C+R+R">Rebati Raman Gaire</a>, <a href="/search/eess?searchtype=author&amp;query=Subedi%2C+R">Ronast Subedi</a>, <a href="/search/eess?searchtype=author&amp;query=Vazquez%2C+E">Eduard Vazquez</a>, <a href="/search/eess?searchtype=author&amp;query=P%C5%82otka%2C+S">Szymon P艂otka</a>, <a href="/search/eess?searchtype=author&amp;query=Lisowska%2C+A">Aneta Lisowska</a>, <a href="/search/eess?searchtype=author&amp;query=Sitek%2C+A">Arkadiusz Sitek</a>, <a href="/search/eess?searchtype=author&amp;query=Attilakos%2C+G">George Attilakos</a>, <a href="/search/eess?searchtype=author&amp;query=Wimalasundera%2C+R">Ruwan Wimalasundera</a>, <a href="/search/eess?searchtype=author&amp;query=David%2C+A+L">Anna L David</a> , et al. (6 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.12512v3-abstract-short" style="display: inline;"> Fetoscopy laser photocoagulation is a widely adopted procedure for treating Twin-to-Twin Transfusion Syndrome (TTTS). The procedure involves photocoagulation pathological anastomoses to regulate blood exchange among twins. The procedure is particularly challenging due to the limited field of view, poor manoeuvrability of the fetoscope, poor visibility, and variability in illumination. These challe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.12512v3-abstract-full').style.display = 'inline'; document.getElementById('2206.12512v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.12512v3-abstract-full" style="display: none;"> Fetoscopy laser photocoagulation is a widely adopted procedure for treating Twin-to-Twin Transfusion Syndrome (TTTS). The procedure involves photocoagulation pathological anastomoses to regulate blood exchange among twins. The procedure is particularly challenging due to the limited field of view, poor manoeuvrability of the fetoscope, poor visibility, and variability in illumination. These challenges may lead to increased surgery time and incomplete ablation. Computer-assisted intervention (CAI) can provide surgeons with decision support and context awareness by identifying key structures in the scene and expanding the fetoscopic field of view through video mosaicking. Research in this domain has been hampered by the lack of high-quality data to design, develop and test CAI algorithms. Through the Fetoscopic Placental Vessel Segmentation and Registration (FetReg2021) challenge, which was organized as part of the MICCAI2021 Endoscopic Vision challenge, we released the first largescale multicentre TTTS dataset for the development of generalized and robust semantic segmentation and video mosaicking algorithms. For this challenge, we released a dataset of 2060 images, pixel-annotated for vessels, tool, fetus and background classes, from 18 in-vivo TTTS fetoscopy procedures and 18 short video clips. Seven teams participated in this challenge and their model performance was assessed on an unseen test dataset of 658 pixel-annotated images from 6 fetoscopic procedures and 6 short clips. The challenge provided an opportunity for creating generalized solutions for fetoscopic scene understanding and mosaicking. In this paper, we present the findings of the FetReg2021 challenge alongside reporting a detailed literature review for CAI in TTTS fetoscopy. Through this challenge, its analysis and the release of multi-centre fetoscopic data, we provide a benchmark for future research in this field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.12512v3-abstract-full').style.display = 'none'; document.getElementById('2206.12512v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at MedIA (Medical Image Analysis)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.09573">arXiv:2204.09573</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2204.09573">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.media.2023.102833">10.1016/j.media.2023.102833 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fetal Brain Tissue Annotation and Segmentation Challenge Results </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Payette%2C+K">Kelly Payette</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+H">Hongwei Li</a>, <a href="/search/eess?searchtype=author&amp;query=de+Dumast%2C+P">Priscille de Dumast</a>, <a href="/search/eess?searchtype=author&amp;query=Licandro%2C+R">Roxane Licandro</a>, <a href="/search/eess?searchtype=author&amp;query=Ji%2C+H">Hui Ji</a>, <a href="/search/eess?searchtype=author&amp;query=Siddiquee%2C+M+M+R">Md Mahfuzur Rahman Siddiquee</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+D">Daguang Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+H">Hao Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Pei%2C+Y">Yuchen Pei</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+L">Lisheng Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+Y">Ying Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Xie%2C+J">Juanying Xie</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+H">Huiquan Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Dong%2C+G">Guiming Dong</a>, <a href="/search/eess?searchtype=author&amp;query=Fu%2C+H">Hao Fu</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+G">Guotai Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Rieu%2C+Z">ZunHyan Rieu</a>, <a href="/search/eess?searchtype=author&amp;query=Kim%2C+D">Donghyeon Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Kim%2C+H+G">Hyun Gi Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Karimi%2C+D">Davood Karimi</a>, <a href="/search/eess?searchtype=author&amp;query=Gholipour%2C+A">Ali Gholipour</a>, <a href="/search/eess?searchtype=author&amp;query=Torres%2C+H+R">Helena R. Torres</a>, <a href="/search/eess?searchtype=author&amp;query=Oliveira%2C+B">Bruno Oliveira</a>, <a href="/search/eess?searchtype=author&amp;query=Vila%C3%A7a%2C+J+L">Jo茫o L. Vila莽a</a> , et al. (33 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.09573v1-abstract-short" style="display: inline;"> In-utero fetal MRI is emerging as an important tool in the diagnosis and analysis of the developing human brain. Automatic segmentation of the developing fetal brain is a vital step in the quantitative analysis of prenatal neurodevelopment both in the research and clinical context. However, manual segmentation of cerebral structures is time-consuming and prone to error and inter-observer variabili&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.09573v1-abstract-full').style.display = 'inline'; document.getElementById('2204.09573v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.09573v1-abstract-full" style="display: none;"> In-utero fetal MRI is emerging as an important tool in the diagnosis and analysis of the developing human brain. Automatic segmentation of the developing fetal brain is a vital step in the quantitative analysis of prenatal neurodevelopment both in the research and clinical context. However, manual segmentation of cerebral structures is time-consuming and prone to error and inter-observer variability. Therefore, we organized the Fetal Tissue Annotation (FeTA) Challenge in 2021 in order to encourage the development of automatic segmentation algorithms on an international level. The challenge utilized FeTA Dataset, an open dataset of fetal brain MRI reconstructions segmented into seven different tissues (external cerebrospinal fluid, grey matter, white matter, ventricles, cerebellum, brainstem, deep grey matter). 20 international teams participated in this challenge, submitting a total of 21 algorithms for evaluation. In this paper, we provide a detailed analysis of the results from both a technical and clinical perspective. All participants relied on deep learning methods, mainly U-Nets, with some variability present in the network architecture, optimization, and image pre- and post-processing. The majority of teams used existing medical imaging deep learning frameworks. The main differences between the submissions were the fine tuning done during training, and the specific pre- and post-processing steps performed. The challenge results showed that almost all submissions performed similarly. Four of the top five teams used ensemble learning methods. However, one team&#39;s algorithm performed significantly superior to the other submissions, and consisted of an asymmetrical U-Net network architecture. This paper provides a first of its kind benchmark for future automatic multi-tissue segmentation algorithms for the developing human brain in utero. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.09573v1-abstract-full').style.display = 'none'; document.getElementById('2204.09573v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Results from FeTA Challenge 2021, held at MICCAI; Manuscript submitted</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10