CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;20 of 20 results for author: <span class="mathjax">Matthew, J</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Matthew%2C+J">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Matthew, J"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Matthew%2C+J&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Matthew, J"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.01201">arXiv:2401.01201</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.01201">pdf</a>, <a href="https://arxiv.org/format/2401.01201">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41746-024-01406-z">10.1038/s41746-024-01406-z <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Whole-examination AI estimation of fetal biometrics from 20-week ultrasound scans </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Venturini%2C+L">Lorenzo Venturini</a>, <a href="/search/cs?searchtype=author&amp;query=Budd%2C+S">Samuel Budd</a>, <a href="/search/cs?searchtype=author&amp;query=Farruggia%2C+A">Alfonso Farruggia</a>, <a href="/search/cs?searchtype=author&amp;query=Wright%2C+R">Robert Wright</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Day%2C+T+G">Thomas G. Day</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=Hajnal%2C+J+V">Jo V. Hajnal</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.01201v1-abstract-short" style="display: inline;"> The current approach to fetal anomaly screening is based on biometric measurements derived from individually selected ultrasound images. In this paper, we introduce a paradigm shift that attains human-level performance in biometric measurement by aggregating automatically extracted biometrics from every frame across an entire scan, with no need for operator intervention. We use a convolutional neu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.01201v1-abstract-full').style.display = 'inline'; document.getElementById('2401.01201v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.01201v1-abstract-full" style="display: none;"> The current approach to fetal anomaly screening is based on biometric measurements derived from individually selected ultrasound images. In this paper, we introduce a paradigm shift that attains human-level performance in biometric measurement by aggregating automatically extracted biometrics from every frame across an entire scan, with no need for operator intervention. We use a convolutional neural network to classify each frame of an ultrasound video recording. We then measure fetal biometrics in every frame where appropriate anatomy is visible. We use a Bayesian method to estimate the true value of each biometric from a large number of measurements and probabilistically reject outliers. We performed a retrospective experiment on 1457 recordings (comprising 48 million frames) of 20-week ultrasound scans, estimated fetal biometrics in those scans and compared our estimates to the measurements sonographers took during the scan. Our method achieves human-level performance in estimating fetal biometrics and estimates well-calibrated credible intervals in which the true biometric value is expected to lie. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.01201v1-abstract-full').style.display = 'none'; document.getElementById('2401.01201v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 16 figures. Submitted to NPJ digital medicine. For associated video file, see http://wp.doc.ic.ac.uk/ifind/wp-content/uploads/sites/79/2023/12/realtime.gif</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.4.7; J.3 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.03941">arXiv:2304.03941</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2304.03941">pdf</a>, <a href="https://arxiv.org/format/2304.03941">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> </div> <p class="title is-5 mathjax"> Towards Realistic Ultrasound Fetal Brain Imaging Synthesis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Iskandar%2C+M">Michelle Iskandar</a>, <a href="/search/cs?searchtype=author&amp;query=Mannering%2C+H">Harvey Mannering</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Z">Zhanxiang Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Kerdegari%2C+H">Hamideh Kerdegari</a>, <a href="/search/cs?searchtype=author&amp;query=Peralta%2C+L">Laura Peralta</a>, <a href="/search/cs?searchtype=author&amp;query=Xochicale%2C+M">Miguel Xochicale</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.03941v1-abstract-short" style="display: inline;"> Prenatal ultrasound imaging is the first-choice modality to assess fetal health. Medical image datasets for AI and ML methods must be diverse (i.e. diagnoses, diseases, pathologies, scanners, demographics, etc), however there are few public ultrasound fetal imaging datasets due to insufficient amounts of clinical data, patient privacy, rare occurrence of abnormalities in general practice, and limi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.03941v1-abstract-full').style.display = 'inline'; document.getElementById('2304.03941v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.03941v1-abstract-full" style="display: none;"> Prenatal ultrasound imaging is the first-choice modality to assess fetal health. Medical image datasets for AI and ML methods must be diverse (i.e. diagnoses, diseases, pathologies, scanners, demographics, etc), however there are few public ultrasound fetal imaging datasets due to insufficient amounts of clinical data, patient privacy, rare occurrence of abnormalities in general practice, and limited experts for data collection and validation. To address such data scarcity, we proposed generative adversarial networks (GAN)-based models, diffusion-super-resolution-GAN and transformer-based-GAN, to synthesise images of fetal ultrasound brain planes from one public dataset. We reported that GAN-based methods can generate 256x256 pixel size of fetal ultrasound trans-cerebellum brain image plane with stable training losses, resulting in lower FID values for diffusion-super-resolution-GAN (average 7.04 and lower FID 5.09 at epoch 10) than the FID values of transformer-based-GAN (average 36.02 and lower 28.93 at epoch 60). The results of this work illustrate the potential of GAN-based methods to synthesise realistic high-resolution ultrasound images, leading to future work with other fetal brain planes, anatomies, devices and the need of a pool of experts to evaluate synthesised images. Code, data and other resources to reproduce this work are available at \url{https://github.com/budai4medtech/midl2023}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.03941v1-abstract-full').style.display = 'none'; document.getElementById('2304.03941v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">3 pages, 1 figure</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.14746">arXiv:2206.14746</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.14746">pdf</a>, <a href="https://arxiv.org/format/2206.14746">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Placenta Segmentation in Ultrasound Imaging: Addressing Sources of Uncertainty and Limited Field-of-View </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zimmer%2C+V+A">Veronika A. Zimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Skelton%2C+E">Emily Skelton</a>, <a href="/search/cs?searchtype=author&amp;query=Wright%2C+R">Robert Wright</a>, <a href="/search/cs?searchtype=author&amp;query=Wheeler%2C+G">Gavin Wheeler</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+S">Shujie Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Ghavami%2C+N">Nooshin Ghavami</a>, <a href="/search/cs?searchtype=author&amp;query=Lloyd%2C+K">Karen Lloyd</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Hajnal%2C+J+V">Joseph V. Hajnal</a>, <a href="/search/cs?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.14746v1-abstract-short" style="display: inline;"> Automatic segmentation of the placenta in fetal ultrasound (US) is challenging due to the (i) high diversity of placenta appearance, (ii) the restricted quality in US resulting in highly variable reference annotations, and (iii) the limited field-of-view of US prohibiting whole placenta assessment at late gestation. In this work, we address these three challenges with a multi-task learning approac&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.14746v1-abstract-full').style.display = 'inline'; document.getElementById('2206.14746v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.14746v1-abstract-full" style="display: none;"> Automatic segmentation of the placenta in fetal ultrasound (US) is challenging due to the (i) high diversity of placenta appearance, (ii) the restricted quality in US resulting in highly variable reference annotations, and (iii) the limited field-of-view of US prohibiting whole placenta assessment at late gestation. In this work, we address these three challenges with a multi-task learning approach that combines the classification of placental location (e.g., anterior, posterior) and semantic placenta segmentation in a single convolutional neural network. Through the classification task the model can learn from larger and more diverse datasets while improving the accuracy of the segmentation task in particular in limited training set conditions. With this approach we investigate the variability in annotations from multiple raters and show that our automatic segmentations (Dice of 0.86 for anterior and 0.83 for posterior placentas) achieve human-level performance as compared to intra- and inter-observer variability. Lastly, our approach can deliver whole placenta segmentation using a multi-view US acquisition pipeline consisting of three stages: multi-probe image acquisition, image fusion and image segmentation. This results in high quality segmentation of larger structures such as the placenta in US with reduced image artifacts which are beyond the field-of-view of single probes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.14746v1-abstract-full').style.display = 'none'; document.getElementById('2206.14746v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages (18 + appendix), 13 figures (9 + appendix)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.01731">arXiv:2206.01731</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.01731">pdf</a>, <a href="https://arxiv.org/format/2206.01731">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> </div> <p class="title is-5 mathjax"> Empirical Study of Quality Image Assessment for Synthesis of Fetal Head Ultrasound Imaging with DCGANs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bautista%2C+T">Thea Bautista</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Kerdegari%2C+H">Hamideh Kerdegari</a>, <a href="/search/cs?searchtype=author&amp;query=Pereira%2C+L+P">Laura Peralta Pereira</a>, <a href="/search/cs?searchtype=author&amp;query=Xochicale%2C+M">Miguel Xochicale</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.01731v2-abstract-short" style="display: inline;"> In this work, we present an empirical study of DCGANs, including hyperparameter heuristics and image quality assessment, as a way to address the scarcity of datasets to investigate fetal head ultrasound. We present experiments to show the impact of different image resolutions, epochs, dataset size input, and learning rates for quality image assessment on four metrics: mutual information (MI), Fr茅c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.01731v2-abstract-full').style.display = 'inline'; document.getElementById('2206.01731v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.01731v2-abstract-full" style="display: none;"> In this work, we present an empirical study of DCGANs, including hyperparameter heuristics and image quality assessment, as a way to address the scarcity of datasets to investigate fetal head ultrasound. We present experiments to show the impact of different image resolutions, epochs, dataset size input, and learning rates for quality image assessment on four metrics: mutual information (MI), Fr茅chet inception distance (FID), peak-signal-to-noise ratio (PSNR), and local binary pattern vector (LBPv). The results show that FID and LBPv have stronger relationship with clinical image quality scores. The resources to reproduce this work are available at \url{https://github.com/budai4medtech/miua2022}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.01731v2-abstract-full').style.display = 'none'; document.getElementById('2206.01731v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.06519">arXiv:2109.06519</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.06519">pdf</a>, <a href="https://arxiv.org/format/2109.06519">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> PRETUS: A plug-in based platform for real-time ultrasound imaging research </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmer%2C+V+A">Veronika A. Zimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Wheeler%2C+G">Gavin Wheeler</a>, <a href="/search/cs?searchtype=author&amp;query=Toussaint%2C+N">Nicolas Toussaint</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+S">Shujie Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Wright%2C+R">Robert Wright</a>, <a href="/search/cs?searchtype=author&amp;query=Skelton%2C+E">Emily Skelton</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jackie Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a>, <a href="/search/cs?searchtype=author&amp;query=Hajnal%2C+J">Jo Hajnal</a>, <a href="/search/cs?searchtype=author&amp;query=Schnabel%2C+J">Julia Schnabel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.06519v1-abstract-short" style="display: inline;"> We present PRETUS -a Plugin-based Real Time UltraSound software platform for live ultrasound image analysis and operator support. The software is lightweight; functionality is brought in via independent plug-ins that can be arranged in sequence. The software allows to capture the real-time stream of ultrasound images from virtually any ultrasound machine, applies computational methods and visualis&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.06519v1-abstract-full').style.display = 'inline'; document.getElementById('2109.06519v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.06519v1-abstract-full" style="display: none;"> We present PRETUS -a Plugin-based Real Time UltraSound software platform for live ultrasound image analysis and operator support. The software is lightweight; functionality is brought in via independent plug-ins that can be arranged in sequence. The software allows to capture the real-time stream of ultrasound images from virtually any ultrasound machine, applies computational methods and visualises the results on-the-fly. Plug-ins can run concurrently without blocking each other. They can be implemented in C ++ and Python. A graphical user interface can be implemented for each plug-in, and presented to the user in a compact way. The software is free and open source, and allows for rapid prototyping and testing of real-time ultrasound imaging methods in a manufacturer-agnostic fashion. The software is provided with input, output and processing plug-ins, as well as with tutorials to illustrate how to develop new plug-ins for PRETUS. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.06519v1-abstract-full').style.display = 'none'; document.getElementById('2109.06519v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 65-04 (Primary); 92C55 (Secondary) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.14682">arXiv:2107.14682</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.14682">pdf</a>, <a href="https://arxiv.org/format/2107.14682">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Can non-specialists provide high quality gold standard labels in challenging modalities? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Budd%2C+S">Samuel Budd</a>, <a href="/search/cs?searchtype=author&amp;query=Day%2C+T">Thomas Day</a>, <a href="/search/cs?searchtype=author&amp;query=Simpson%2C+J">John Simpson</a>, <a href="/search/cs?searchtype=author&amp;query=Lloyd%2C+K">Karen Lloyd</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Skelton%2C+E">Emily Skelton</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.14682v1-abstract-short" style="display: inline;"> Probably yes. -- Supervised Deep Learning dominates performance scores for many computer vision tasks and defines the state-of-the-art. However, medical image analysis lags behind natural image applications. One of the many reasons is the lack of well annotated medical image data available to researchers. One of the first things researchers are told is that we require significant expertise to reli&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.14682v1-abstract-full').style.display = 'inline'; document.getElementById('2107.14682v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.14682v1-abstract-full" style="display: none;"> Probably yes. -- Supervised Deep Learning dominates performance scores for many computer vision tasks and defines the state-of-the-art. However, medical image analysis lags behind natural image applications. One of the many reasons is the lack of well annotated medical image data available to researchers. One of the first things researchers are told is that we require significant expertise to reliably and accurately interpret and label such data. We see significant inter- and intra-observer variability between expert annotations of medical images. Still, it is a widely held assumption that novice annotators are unable to provide useful annotations for use by clinical Deep Learning models. In this work we challenge this assumption and examine the implications of using a minimally trained novice labelling workforce to acquire annotations for a complex medical image dataset. We study the time and cost implications of using novice annotators, the raw performance of novice annotators compared to gold-standard expert annotators, and the downstream effects on a trained Deep Learning segmentation model&#39;s performance for detecting a specific congenital heart disease (hypoplastic left heart syndrome) in fetal ultrasound imaging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.14682v1-abstract-full').style.display = 'none'; document.getElementById('2107.14682v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at the FAIR workshop in conjunction with MICCAI&#39;21</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.02643">arXiv:2107.02643</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.02643">pdf</a>, <a href="https://arxiv.org/format/2107.02643">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Detecting Hypo-plastic Left Heart Syndrome in Fetal Ultrasound via Disease-specific Atlas Maps </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Budd%2C+S">Samuel Budd</a>, <a href="/search/cs?searchtype=author&amp;query=Sinclair%2C+M">Matthew Sinclair</a>, <a href="/search/cs?searchtype=author&amp;query=Day%2C+T">Thomas Day</a>, <a href="/search/cs?searchtype=author&amp;query=Vlontzos%2C+A">Athanasios Vlontzos</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+J">Jeremy Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+T">Tianrui Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jaqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Skelton%2C+E">Emily Skelton</a>, <a href="/search/cs?searchtype=author&amp;query=Simpson%2C+J">John Simpson</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=Glocker%2C+B">Ben Glocker</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Robinson%2C+E+C">Emma C. Robinson</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.02643v1-abstract-short" style="display: inline;"> Fetal ultrasound screening during pregnancy plays a vital role in the early detection of fetal malformations which have potential long-term health impacts. The level of skill required to diagnose such malformations from live ultrasound during examination is high and resources for screening are often limited. We present an interpretable, atlas-learning segmentation method for automatic diagnosis of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.02643v1-abstract-full').style.display = 'inline'; document.getElementById('2107.02643v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.02643v1-abstract-full" style="display: none;"> Fetal ultrasound screening during pregnancy plays a vital role in the early detection of fetal malformations which have potential long-term health impacts. The level of skill required to diagnose such malformations from live ultrasound during examination is high and resources for screening are often limited. We present an interpretable, atlas-learning segmentation method for automatic diagnosis of Hypo-plastic Left Heart Syndrome (HLHS) from a single `4 Chamber Heart&#39; view image. We propose to extend the recently introduced Image-and-Spatial Transformer Networks (Atlas-ISTN) into a framework that enables sensitising atlas generation to disease. In this framework we can jointly learn image segmentation, registration, atlas construction and disease prediction while providing a maximum level of clinical interpretability compared to direct image classification methods. As a result our segmentation allows diagnoses competitive with expert-derived manual diagnosis and yields an AUC-ROC of 0.978 (1043 cases for training, 260 for validation and 325 for testing). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.02643v1-abstract-full').style.display = 'none'; document.getElementById('2107.02643v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">MICCAI&#39;21 Main Conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.03679">arXiv:2012.03679</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2012.03679">pdf</a>, <a href="https://arxiv.org/format/2012.03679">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Learning normal appearance for fetal anomaly screening: Application to the unsupervised detection of Hypoplastic Left Heart Syndrome </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chotzoglou%2C+E">Elisa Chotzoglou</a>, <a href="/search/cs?searchtype=author&amp;query=Day%2C+T">Thomas Day</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+J">Jeremy Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Lloyd%2C+D">David Lloyd</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=Simpson%2C+J">John Simpson</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.03679v2-abstract-short" style="display: inline;"> Congenital heart disease is considered as one the most common groups of congenital malformations which affects $6-11$ per $1000$ newborns. In this work, an automated framework for detection of cardiac anomalies during ultrasound screening is proposed and evaluated on the example of Hypoplastic Left Heart Syndrome (HLHS), a sub-category of congenital heart disease. We propose an unsupervised approa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.03679v2-abstract-full').style.display = 'inline'; document.getElementById('2012.03679v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.03679v2-abstract-full" style="display: none;"> Congenital heart disease is considered as one the most common groups of congenital malformations which affects $6-11$ per $1000$ newborns. In this work, an automated framework for detection of cardiac anomalies during ultrasound screening is proposed and evaluated on the example of Hypoplastic Left Heart Syndrome (HLHS), a sub-category of congenital heart disease. We propose an unsupervised approach that learns healthy anatomy exclusively from clinically confirmed normal control patients. We evaluate a number of known anomaly detection frameworks together with a model architecture based on the $伪$-GAN network and find evidence that the proposed model performs significantly better than the state-of-the-art in image-based anomaly detection, yielding average $0.81$ AUC \emph{and} a better robustness towards initialisation compared to previous works. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.03679v2-abstract-full').style.display = 'none'; document.getElementById('2012.03679v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.00739">arXiv:2011.00739</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2011.00739">pdf</a>, <a href="https://arxiv.org/format/2011.00739">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Mutual Information-based Disentangled Neural Networks for Classifying Unseen Categories in Different Domains: Application to Fetal Ultrasound Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Meng%2C+Q">Qingjie Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmer%2C+V+A">Veronika A. Zimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Lloyd%2C+D+F+A">David F. A. Lloyd</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.00739v2-abstract-short" style="display: inline;"> Deep neural networks exhibit limited generalizability across images with different entangled domain features and categorical features. Learning generalizable features that can form universal categorical decision boundaries across domains is an interesting and difficult challenge. This problem occurs frequently in medical imaging applications when attempts are made to deploy and improve deep learni&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.00739v2-abstract-full').style.display = 'inline'; document.getElementById('2011.00739v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.00739v2-abstract-full" style="display: none;"> Deep neural networks exhibit limited generalizability across images with different entangled domain features and categorical features. Learning generalizable features that can form universal categorical decision boundaries across domains is an interesting and difficult challenge. This problem occurs frequently in medical imaging applications when attempts are made to deploy and improve deep learning models across different image acquisition devices, across acquisition parameters or if some classes are unavailable in new training databases. To address this problem, we propose Mutual Information-based Disentangled Neural Networks (MIDNet), which extract generalizable categorical features to transfer knowledge to unseen categories in a target domain. The proposed MIDNet adopts a semi-supervised learning paradigm to alleviate the dependency on labeled data. This is important for real-world applications where data annotation is time-consuming, costly and requires training and expertise. We extensively evaluate the proposed method on fetal ultrasound datasets for two different image classification tasks where domain features are respectively defined by shadow artifacts and image acquisition devices. Experimental results show that the proposed method outperforms the state-of-the-art on the classification of unseen categories in a target domain with sparsely labeled training data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.00739v2-abstract-full').style.display = 'none'; document.getElementById('2011.00739v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:2003.00321</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.06272">arXiv:2007.06272</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2007.06272">pdf</a>, <a href="https://arxiv.org/format/2007.06272">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Screen Tracking for Clinical Translation of Live Ultrasound Image Analysis Methods </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Treivase%2C+S">Simona Treivase</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Skelton%2C+E">Emily Skelton</a>, <a href="/search/cs?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a>, <a href="/search/cs?searchtype=author&amp;query=Toussaint%2C+N">Nicolas Toussaint</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.06272v1-abstract-short" style="display: inline;"> Ultrasound (US) imaging is one of the most commonly used non-invasive imaging techniques. However, US image acquisition requires simultaneous guidance of the transducer and interpretation of images, which is a highly challenging task that requires years of training. Despite many recent developments in intra-examination US image analysis, the results are not easy to translate to a clinical setting.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.06272v1-abstract-full').style.display = 'inline'; document.getElementById('2007.06272v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.06272v1-abstract-full" style="display: none;"> Ultrasound (US) imaging is one of the most commonly used non-invasive imaging techniques. However, US image acquisition requires simultaneous guidance of the transducer and interpretation of images, which is a highly challenging task that requires years of training. Despite many recent developments in intra-examination US image analysis, the results are not easy to translate to a clinical setting. We propose a generic framework to extract the US images and superimpose the results of an analysis task, without any need for physical connection or alteration to the US system. The proposed method captures the US image by tracking the screen with a camera fixed at the sonographer&#39;s view point and reformats the captured image to the right aspect ratio, in 87.66 +- 3.73ms on average. It is hypothesized that this would enable to input such retrieved image into an image processing pipeline to extract information that can help improve the examination. This information could eventually be projected back to the sonographer&#39;s field of view in real time using, for example, an augmented reality (AR) headset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.06272v1-abstract-full').style.display = 'none'; document.getElementById('2007.06272v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T45 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1908.02582">arXiv:1908.02582</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1908.02582">pdf</a>, <a href="https://arxiv.org/format/1908.02582">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Confident Head Circumference Measurement from Ultrasound with Real-time Feedback for Sonographers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Budd%2C+S">Samuel Budd</a>, <a href="/search/cs?searchtype=author&amp;query=Sinclair%2C+M">Matthew Sinclair</a>, <a href="/search/cs?searchtype=author&amp;query=Khanal%2C+B">Bishesh Khanal</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Lloyd%2C+D">David Lloyd</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Toussaint%2C+N">Nicolas Toussaint</a>, <a href="/search/cs?searchtype=author&amp;query=Robinson%2C+E">Emma Robinson</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1908.02582v1-abstract-short" style="display: inline;"> Manual estimation of fetal Head Circumference (HC) from Ultrasound (US) is a key biometric for monitoring the healthy development of fetuses. Unfortunately, such measurements are subject to large inter-observer variability, resulting in low early-detection rates of fetal abnormalities. To address this issue, we propose a novel probabilistic Deep Learning approach for real-time automated estimation&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.02582v1-abstract-full').style.display = 'inline'; document.getElementById('1908.02582v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1908.02582v1-abstract-full" style="display: none;"> Manual estimation of fetal Head Circumference (HC) from Ultrasound (US) is a key biometric for monitoring the healthy development of fetuses. Unfortunately, such measurements are subject to large inter-observer variability, resulting in low early-detection rates of fetal abnormalities. To address this issue, we propose a novel probabilistic Deep Learning approach for real-time automated estimation of fetal HC. This system feeds back statistics on measurement robustness to inform users how confident a deep neural network is in evaluating suitable views acquired during free-hand ultrasound examination. In real-time scenarios, this approach may be exploited to guide operators to scan planes that are as close as possible to the underlying distribution of training images, for the purpose of improving inter-operator consistency. We train on free-hand ultrasound data from over 2000 subjects (2848 training/540 test) and show that our method is able to predict HC measurements within 1.81$\pm$1.65mm deviation from the ground truth, with 50% of the test images fully contained within the predicted confidence margins, and an average of 1.82$\pm$1.78mm deviation from the margin for the remaining cases that are not fully contained. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.02582v1-abstract-full').style.display = 'none'; document.getElementById('1908.02582v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at MICCAI 2019; Demo video available on Twitter (@sambuddinc)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1902.05458">arXiv:1902.05458</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1902.05458">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Robotic-assisted Ultrasound for Fetal Imaging: Evolution from Single-arm to Dual-arm System </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shuangyi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Housden%2C+J">James Housden</a>, <a href="/search/cs?searchtype=author&amp;query=Noh%2C+Y">Yohan Noh</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+D">Davinder Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+A">Anisha Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Skelton%2C+E">Emily Skelton</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+C">Cornelius Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Back%2C+J">Junghwan Back</a>, <a href="/search/cs?searchtype=author&amp;query=Lindenroth%2C+L">Lukas Lindenroth</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Toussaint%2C+N">Nicolas Toussaint</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmer%2C+V">Veronika Zimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Knight%2C+C">Caroline Knight</a>, <a href="/search/cs?searchtype=author&amp;query=Fletcher%2C+T">Tara Fletcher</a>, <a href="/search/cs?searchtype=author&amp;query=Lloyd%2C+D">David Lloyd</a>, <a href="/search/cs?searchtype=author&amp;query=Simpson%2C+J">John Simpson</a>, <a href="/search/cs?searchtype=author&amp;query=Pasupathy%2C+D">Dharmintra Pasupathy</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Hongbin Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Althoefer%2C+K">Kaspar Althoefer</a>, <a href="/search/cs?searchtype=author&amp;query=Hajnal%2C+J">Joseph Hajnal</a>, <a href="/search/cs?searchtype=author&amp;query=Razavi%2C+R">Reza Razavi</a>, <a href="/search/cs?searchtype=author&amp;query=Rhode%2C+K">Kawal Rhode</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1902.05458v3-abstract-short" style="display: inline;"> The development of robotic-assisted extracorporeal ultrasound systems has a long history and a number of projects have been proposed since the 1990s focusing on different technical aspects. These aim to resolve the deficiencies of on-site manual manipulation of hand-held ultrasound probes. This paper presents the recent ongoing developments of a series of bespoke robotic systems, including both si&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.05458v3-abstract-full').style.display = 'inline'; document.getElementById('1902.05458v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1902.05458v3-abstract-full" style="display: none;"> The development of robotic-assisted extracorporeal ultrasound systems has a long history and a number of projects have been proposed since the 1990s focusing on different technical aspects. These aim to resolve the deficiencies of on-site manual manipulation of hand-held ultrasound probes. This paper presents the recent ongoing developments of a series of bespoke robotic systems, including both single-arm and dual-arm versions, for a project known as intelligent Fetal Imaging and Diagnosis (iFIND). After a brief review of the development history of the extracorporeal ultrasound robotic system used for fetal and abdominal examinations, the specific aim of the iFIND robots, the design evolution, the implementation details of each version, and the initial clinical feedback of the iFIND robot series are presented. Based on the preliminary testing of these newly-proposed robots on 42 volunteers, the successful and re-liable working of the mechatronic systems were validated. Analysis of a participant questionnaire indicates a comfortable scanning experience for the volunteers and a good acceptance rate to being scanned by the robots. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.05458v3-abstract-full').style.display = 'none'; document.getElementById('1902.05458v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 6 figures, TAROS conference 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.08164">arXiv:1811.08164</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1811.08164">pdf</a>, <a href="https://arxiv.org/format/1811.08164">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Weakly Supervised Estimation of Shadow Confidence Maps in Fetal Ultrasound Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Meng%2C+Q">Qingjie Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Sinclair%2C+M">Matthew Sinclair</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmer%2C+V">Veronika Zimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Hou%2C+B">Benjamin Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Rajchl%2C+M">Martin Rajchl</a>, <a href="/search/cs?searchtype=author&amp;query=Toussaint%2C+N">Nicolas Toussaint</a>, <a href="/search/cs?searchtype=author&amp;query=Oktay%2C+O">Ozan Oktay</a>, <a href="/search/cs?searchtype=author&amp;query=Schlemper%2C+J">Jo Schlemper</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Housden%2C+J">James Housden</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Schnabel%2C+J">Julia Schnabel</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.08164v3-abstract-short" style="display: inline;"> Detecting acoustic shadows in ultrasound images is important in many clinical and engineering applications. Real-time feedback of acoustic shadows can guide sonographers to a standardized diagnostic viewing plane with minimal artifacts and can provide additional information for other automatic image analysis algorithms. However, automatically detecting shadow regions using learning-based algorithm&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08164v3-abstract-full').style.display = 'inline'; document.getElementById('1811.08164v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.08164v3-abstract-full" style="display: none;"> Detecting acoustic shadows in ultrasound images is important in many clinical and engineering applications. Real-time feedback of acoustic shadows can guide sonographers to a standardized diagnostic viewing plane with minimal artifacts and can provide additional information for other automatic image analysis algorithms. However, automatically detecting shadow regions using learning-based algorithms is challenging because pixel-wise ground truth annotation of acoustic shadows is subjective and time consuming. In this paper we propose a weakly supervised method for automatic confidence estimation of acoustic shadow regions. Our method is able to generate a dense shadow-focused confidence map. In our method, a shadow-seg module is built to learn general shadow features for shadow segmentation, based on global image-level annotations as well as a small number of coarse pixel-wise shadow annotations. A transfer function is introduced to extend the obtained binary shadow segmentation to a reference confidence map. Additionally, a confidence estimation network is proposed to learn the mapping between input images and the reference confidence maps. This network is able to predict shadow confidence maps directly from input images during inference. We use evaluation metrics such as DICE, inter-class correlation and etc. to verify the effectiveness of our method. Our method is more consistent than human annotation, and outperforms the state-of-the-art quantitatively in shadow segmentation and qualitatively in confidence estimation of shadow regions. We further demonstrate the applicability of our method by integrating shadow confidence maps into tasks such as ultrasound image classification, multi-view image fusion and automated biometric measurements. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08164v3-abstract-full').style.display = 'none'; document.getElementById('1811.08164v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 May, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1808.00793">arXiv:1808.00793</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1808.00793">pdf</a>, <a href="https://arxiv.org/format/1808.00793">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Weakly Supervised Localisation for Fetal Ultrasound Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Toussaint%2C+N">Nicolas Toussaint</a>, <a href="/search/cs?searchtype=author&amp;query=Khanal%2C+B">Bishesh Khanal</a>, <a href="/search/cs?searchtype=author&amp;query=Sinclair%2C+M">Matthew Sinclair</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Skelton%2C+E">Emily Skelton</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1808.00793v1-abstract-short" style="display: inline;"> This paper addresses the task of detecting and localising fetal anatomical regions in 2D ultrasound images, where only image-level labels are present at training, i.e. without any localisation or segmentation information. We examine the use of convolutional neural network architectures coupled with soft proposal layers. The resulting network simultaneously performs anatomical region detection (cla&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.00793v1-abstract-full').style.display = 'inline'; document.getElementById('1808.00793v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1808.00793v1-abstract-full" style="display: none;"> This paper addresses the task of detecting and localising fetal anatomical regions in 2D ultrasound images, where only image-level labels are present at training, i.e. without any localisation or segmentation information. We examine the use of convolutional neural network architectures coupled with soft proposal layers. The resulting network simultaneously performs anatomical region detection (classification) and localisation tasks. We generate a proposal map describing the attention of the network for a particular class. The network is trained on 85,500 2D fetal Ultrasound images and their associated labels. Labels correspond to six anatomical regions: head, spine, thorax, abdomen, limbs, and placenta. Detection achieves an average accuracy of 90\% on individual regions, and show that the proposal maps correlate well with relevant anatomical structures. This work presents itself as a powerful and essential step towards subsequent tasks such as fetal position and pose estimation, organ-specific segmentation, or image-guided navigation. Code and additional material is available at https://ntoussaint.github.io/fetalnav <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.00793v1-abstract-full').style.display = 'none'; document.getElementById('1808.00793v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 August, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4th Workshop on Deep Learning for Medical Image Analysis, MICCAI 2018, Granada, Spain</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1807.10583">arXiv:1807.10583</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1807.10583">pdf</a>, <a href="https://arxiv.org/format/1807.10583">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> EchoFusion: Tracking and Reconstruction of Objects in 4D Freehand Ultrasound Imaging without External Trackers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Khanal%2C+B">Bishesh Khanal</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+A">Alberto Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Toussaint%2C+N">Nicolas Toussaint</a>, <a href="/search/cs?searchtype=author&amp;query=McDonagh%2C+S">Steven McDonagh</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmer%2C+V">Veronika Zimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Skelton%2C+E">Emily Skelton</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Grzech%2C+D">Daniel Grzech</a>, <a href="/search/cs?searchtype=author&amp;query=Wright%2C+R">Robert Wright</a>, <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+C">Chandni Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Hou%2C+B">Benjamin Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Schnabel%2C+J+A">Julia A. Schnabel</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1807.10583v1-abstract-short" style="display: inline;"> Ultrasound (US) is the most widely used fetal imaging technique. However, US images have limited capture range, and suffer from view dependent artefacts such as acoustic shadows. Compounding of overlapping 3D US acquisitions into a high-resolution volume can extend the field of view and remove image artefacts, which is useful for retrospective analysis including population based studies. However,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1807.10583v1-abstract-full').style.display = 'inline'; document.getElementById('1807.10583v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1807.10583v1-abstract-full" style="display: none;"> Ultrasound (US) is the most widely used fetal imaging technique. However, US images have limited capture range, and suffer from view dependent artefacts such as acoustic shadows. Compounding of overlapping 3D US acquisitions into a high-resolution volume can extend the field of view and remove image artefacts, which is useful for retrospective analysis including population based studies. However, such volume reconstructions require information about relative transformations between probe positions from which the individual volumes were acquired. In prenatal US scans, the fetus can move independently from the mother, making external trackers such as electromagnetic or optical tracking unable to track the motion between probe position and the moving fetus. We provide a novel methodology for image-based tracking and volume reconstruction by combining recent advances in deep learning and simultaneous localisation and mapping (SLAM). Tracking semantics are established through the use of a Residual 3D U-Net and the output is fed to the SLAM algorithm. As a proof of concept, experiments are conducted on US volumes taken from a whole body fetal phantom, and from the heads of real fetuses. For the fetal head segmentation, we also introduce a novel weak annotation approach to minimise the required manual effort for ground truth annotation. We evaluate our method qualitatively, and quantitatively with respect to tissue discrimination accuracy and tracking robustness. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1807.10583v1-abstract-full').style.display = 'none'; document.getElementById('1807.10583v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 July, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">MICCAI Workshop on Perinatal, Preterm and Paediatric Image analysis (PIPPI), 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.07486">arXiv:1806.07486</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1806.07486">pdf</a>, <a href="https://arxiv.org/format/1806.07486">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-00928-1_45">10.1007/978-3-030-00928-1_45 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Standard Plane Detection in 3D Fetal Ultrasound Using an Iterative Transformation Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yuanwei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Khanal%2C+B">Bishesh Khanal</a>, <a href="/search/cs?searchtype=author&amp;query=Hou%2C+B">Benjamin Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Alansary%2C+A">Amir Alansary</a>, <a href="/search/cs?searchtype=author&amp;query=Cerrolaza%2C+J+J">Juan J. Cerrolaza</a>, <a href="/search/cs?searchtype=author&amp;query=Sinclair%2C+M">Matthew Sinclair</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+C">Chandni Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Knight%2C+C">Caroline Knight</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.07486v2-abstract-short" style="display: inline;"> Standard scan plane detection in fetal brain ultrasound (US) forms a crucial step in the assessment of fetal development. In clinical settings, this is done by manually manoeuvring a 2D probe to the desired scan plane. With the advent of 3D US, the entire fetal brain volume containing these standard planes can be easily acquired. However, manual standard plane identification in 3D volume is labour&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.07486v2-abstract-full').style.display = 'inline'; document.getElementById('1806.07486v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.07486v2-abstract-full" style="display: none;"> Standard scan plane detection in fetal brain ultrasound (US) forms a crucial step in the assessment of fetal development. In clinical settings, this is done by manually manoeuvring a 2D probe to the desired scan plane. With the advent of 3D US, the entire fetal brain volume containing these standard planes can be easily acquired. However, manual standard plane identification in 3D volume is labour-intensive and requires expert knowledge of fetal anatomy. We propose a new Iterative Transformation Network (ITN) for the automatic detection of standard planes in 3D volumes. ITN uses a convolutional neural network to learn the relationship between a 2D plane image and the transformation parameters required to move that plane towards the location/orientation of the standard plane in the 3D volume. During inference, the current plane image is passed iteratively to the network until it converges to the standard plane location. We explore the effect of using different transformation representations as regression outputs of ITN. Under a multi-task learning framework, we introduce additional classification probability outputs to the network to act as confidence measures for the regressed transformation parameters in order to further improve the localisation accuracy. When evaluated on 72 US volumes of fetal brain, our method achieves an error of 3.83mm/12.7 degrees and 3.80mm/12.6 degrees for the transventricular and transcerebellar planes respectively and takes 0.46s per plane. Source code is publicly available at https://github.com/yuanwei1989/plane-detection. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.07486v2-abstract-full').style.display = 'none'; document.getElementById('1806.07486v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 2 figures, accepted for MICCAI 2018; Added link to source code</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> LNCS 11070 (2018) 392-400 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.06987">arXiv:1806.06987</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1806.06987">pdf</a>, <a href="https://arxiv.org/format/1806.06987">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-00928-1_64">10.1007/978-3-030-00928-1_64 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fast Multiple Landmark Localisation Using a Patch-based Iterative Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yuanwei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Alansary%2C+A">Amir Alansary</a>, <a href="/search/cs?searchtype=author&amp;query=Cerrolaza%2C+J+J">Juan J. Cerrolaza</a>, <a href="/search/cs?searchtype=author&amp;query=Khanal%2C+B">Bishesh Khanal</a>, <a href="/search/cs?searchtype=author&amp;query=Sinclair%2C+M">Matthew Sinclair</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+C">Chandni Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Knight%2C+C">Caroline Knight</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.06987v2-abstract-short" style="display: inline;"> We propose a new Patch-based Iterative Network (PIN) for fast and accurate landmark localisation in 3D medical volumes. PIN utilises a Convolutional Neural Network (CNN) to learn the spatial relationship between an image patch and anatomical landmark positions. During inference, patches are repeatedly passed to the CNN until the estimated landmark position converges to the true landmark location.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.06987v2-abstract-full').style.display = 'inline'; document.getElementById('1806.06987v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.06987v2-abstract-full" style="display: none;"> We propose a new Patch-based Iterative Network (PIN) for fast and accurate landmark localisation in 3D medical volumes. PIN utilises a Convolutional Neural Network (CNN) to learn the spatial relationship between an image patch and anatomical landmark positions. During inference, patches are repeatedly passed to the CNN until the estimated landmark position converges to the true landmark location. PIN is computationally efficient since the inference stage only selectively samples a small number of patches in an iterative fashion rather than a dense sampling at every location in the volume. Our approach adopts a multi-task learning framework that combines regression and classification to improve localisation accuracy. We extend PIN to localise multiple landmarks by using principal component analysis, which models the global anatomical relationships between landmarks. We have evaluated PIN using 72 3D ultrasound images from fetal screening examinations. PIN achieves quantitatively an average landmark localisation error of 5.59mm and a runtime of 0.44s to predict 10 landmarks per volume. Qualitatively, anatomical 2D standard scan planes derived from the predicted landmark locations are visually similar to the clinical ground truth. Source code is publicly available at https://github.com/yuanwei1989/landmark-detection. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.06987v2-abstract-full').style.display = 'none'; document.getElementById('1806.06987v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 4 figures, Accepted for MICCAI 2018</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> LNCS 11070 (2018) 563-571 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1804.09102">arXiv:1804.09102</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1804.09102">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Human-level Performance On Automatic Head Biometrics In Fetal Ultrasound Using Fully Convolutional Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sinclair%2C+M">Matthew Sinclair</a>, <a href="/search/cs?searchtype=author&amp;query=Baumgartner%2C+C+F">Christian F. Baumgartner</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+W">Wenjia Bai</a>, <a href="/search/cs?searchtype=author&amp;query=Martinez%2C+J+C">Juan Cerrolaza Martinez</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yuanwei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Smith%2C+S">Sandra Smith</a>, <a href="/search/cs?searchtype=author&amp;query=Knight%2C+C+L">Caroline L. Knight</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a>, <a href="/search/cs?searchtype=author&amp;query=Hajnal%2C+J">Jo Hajnal</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+A+P">Andrew P. King</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1804.09102v1-abstract-short" style="display: inline;"> Measurement of head biometrics from fetal ultrasonography images is of key importance in monitoring the healthy development of fetuses. However, the accurate measurement of relevant anatomical structures is subject to large inter-observer variability in the clinic. To address this issue, an automated method utilizing Fully Convolutional Networks (FCN) is proposed to determine measurements of fetal&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.09102v1-abstract-full').style.display = 'inline'; document.getElementById('1804.09102v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1804.09102v1-abstract-full" style="display: none;"> Measurement of head biometrics from fetal ultrasonography images is of key importance in monitoring the healthy development of fetuses. However, the accurate measurement of relevant anatomical structures is subject to large inter-observer variability in the clinic. To address this issue, an automated method utilizing Fully Convolutional Networks (FCN) is proposed to determine measurements of fetal head circumference (HC) and biparietal diameter (BPD). An FCN was trained on approximately 2000 2D ultrasound images of the head with annotations provided by 45 different sonographers during routine screening examinations to perform semantic segmentation of the head. An ellipse is fitted to the resulting segmentation contours to mimic the annotation typically produced by a sonographer. The model&#39;s performance was compared with inter-observer variability, where two experts manually annotated 100 test images. Mean absolute model-expert error was slightly better than inter-observer error for HC (1.99mm vs 2.16mm), and comparable for BPD (0.61mm vs 0.59mm), as well as Dice coefficient (0.980 vs 0.980). Our results demonstrate that the model performs at a level similar to a human expert, and learns to produce accurate predictions from a large dataset annotated by many sonographers. Additionally, measurements are generated in near real-time at 15fps on a GPU, which could speed up clinical workflow for both skilled and trainee sonographers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.09102v1-abstract-full').style.display = 'none'; document.getElementById('1804.09102v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">EMBC 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1804.05338">arXiv:1804.05338</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1804.05338">pdf</a>, <a href="https://arxiv.org/format/1804.05338">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Attention-Gated Networks for Improving Ultrasound Scan Plane Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Schlemper%2C+J">Jo Schlemper</a>, <a href="/search/cs?searchtype=author&amp;query=Oktay%2C+O">Ozan Oktay</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+L">Liang Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Knight%2C+C">Caroline Knight</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a>, <a href="/search/cs?searchtype=author&amp;query=Glocker%2C+B">Ben Glocker</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1804.05338v1-abstract-short" style="display: inline;"> In this work, we apply an attention-gated network to real-time automated scan plane detection for fetal ultrasound screening. Scan plane detection in fetal ultrasound is a challenging problem due the poor image quality resulting in low interpretability for both clinicians and automated algorithms. To solve this, we propose incorporating self-gated soft-attention mechanisms. A soft-attention mechan&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.05338v1-abstract-full').style.display = 'inline'; document.getElementById('1804.05338v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1804.05338v1-abstract-full" style="display: none;"> In this work, we apply an attention-gated network to real-time automated scan plane detection for fetal ultrasound screening. Scan plane detection in fetal ultrasound is a challenging problem due the poor image quality resulting in low interpretability for both clinicians and automated algorithms. To solve this, we propose incorporating self-gated soft-attention mechanisms. A soft-attention mechanism generates a gating signal that is end-to-end trainable, which allows the network to contextualise local information useful for prediction. The proposed attention mechanism is generic and it can be easily incorporated into any existing classification architectures, while only requiring a few additional parameters. We show that, when the base network has a high capacity, the incorporated attention mechanism can provide efficient object localisation while improving the overall performance. When the base network has a low capacity, the method greatly outperforms the baseline approach and significantly reduces false positives. Lastly, the generated attention maps allow us to understand the model&#39;s reasoning process, which can also be used for weakly supervised object localisation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.05338v1-abstract-full').style.display = 'none'; document.getElementById('1804.05338v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to MIDL2018 (OpenReview: https://openreview.net/forum?id=BJtn7-3sM)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1612.05601">arXiv:1612.05601</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1612.05601">pdf</a>, <a href="https://arxiv.org/format/1612.05601">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SonoNet: Real-Time Detection and Localisation of Fetal Standard Scan Planes in Freehand Ultrasound </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Baumgartner%2C+C+F">Christian F. Baumgartner</a>, <a href="/search/cs?searchtype=author&amp;query=Kamnitsas%2C+K">Konstantinos Kamnitsas</a>, <a href="/search/cs?searchtype=author&amp;query=Matthew%2C+J">Jacqueline Matthew</a>, <a href="/search/cs?searchtype=author&amp;query=Fletcher%2C+T+P">Tara P. Fletcher</a>, <a href="/search/cs?searchtype=author&amp;query=Smith%2C+S">Sandra Smith</a>, <a href="/search/cs?searchtype=author&amp;query=Koch%2C+L+M">Lisa M. Koch</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1612.05601v2-abstract-short" style="display: inline;"> Identifying and interpreting fetal standard scan planes during 2D ultrasound mid-pregnancy examinations are highly complex tasks which require years of training. Apart from guiding the probe to the correct location, it can be equally difficult for a non-expert to identify relevant structures within the image. Automatic image processing can provide tools to help experienced as well as inexperienced&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1612.05601v2-abstract-full').style.display = 'inline'; document.getElementById('1612.05601v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1612.05601v2-abstract-full" style="display: none;"> Identifying and interpreting fetal standard scan planes during 2D ultrasound mid-pregnancy examinations are highly complex tasks which require years of training. Apart from guiding the probe to the correct location, it can be equally difficult for a non-expert to identify relevant structures within the image. Automatic image processing can provide tools to help experienced as well as inexperienced operators with these tasks. In this paper, we propose a novel method based on convolutional neural networks which can automatically detect 13 fetal standard views in freehand 2D ultrasound data as well as provide a localisation of the fetal structures via a bounding box. An important contribution is that the network learns to localise the target anatomy using weak supervision based on image-level labels only. The network architecture is designed to operate in real-time while providing optimal output for the localisation task. We present results for real-time annotation, retrospective frame retrieval from saved videos, and localisation on a very large and challenging dataset consisting of images and video recordings of full clinical anomaly screenings. We found that the proposed method achieved an average F1-score of 0.798 in a realistic classification experiment modelling real-time detection, and obtained a 90.09% accuracy for retrospective frame retrieval. Moreover, an accuracy of 77.8% was achieved on the localisation task. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1612.05601v2-abstract-full').style.display = 'none'; document.getElementById('1612.05601v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 December, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 8 figures, published in IEEE Transactions in Medical Imaging</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10