CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;17 of 17 results for author: <span class="mathjax">Yao, B</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/eess" aria-role="search"> Searching in archive <strong>eess</strong>. <a href="/search/?searchtype=author&amp;query=Yao%2C+B">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Yao, B"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Yao%2C+B&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Yao, B"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.13696">arXiv:2409.13696</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.13696">pdf</a>, <a href="https://arxiv.org/format/2409.13696">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Implicit Neural Representation for Sparse-view Photoacoustic Computed Tomography </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Bowei Yao</a>, <a href="/search/eess?searchtype=author&amp;query=Cui%2C+S">Shilong Cui</a>, <a href="/search/eess?searchtype=author&amp;query=Dai%2C+H">Haizhao Dai</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+Q">Qing Wu</a>, <a href="/search/eess?searchtype=author&amp;query=Xiao%2C+Y">Youshen Xiao</a>, <a href="/search/eess?searchtype=author&amp;query=Gao%2C+F">Fei Gao</a>, <a href="/search/eess?searchtype=author&amp;query=Yu%2C+J">Jingyi Yu</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+Y">Yuyao Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Cai%2C+X">Xiran Cai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.13696v1-abstract-short" style="display: inline;"> High-quality imaging in photoacoustic computed tomography (PACT) usually requires a high-channel count system for dense spatial sampling around the object to avoid aliasing-related artefacts. To reduce system complexity, various image reconstruction approaches, such as model-based (MB) and deep learning based methods, have been explored to mitigate the artefacts associated with sparse-view acquisi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13696v1-abstract-full').style.display = 'inline'; document.getElementById('2409.13696v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.13696v1-abstract-full" style="display: none;"> High-quality imaging in photoacoustic computed tomography (PACT) usually requires a high-channel count system for dense spatial sampling around the object to avoid aliasing-related artefacts. To reduce system complexity, various image reconstruction approaches, such as model-based (MB) and deep learning based methods, have been explored to mitigate the artefacts associated with sparse-view acquisition. However, the explored methods formulated the reconstruction problem in a discrete framework, making it prone to measurement errors, discretization errors, and the extend of the ill-poseness of the problem scales with the discretization resolution. In this work, an implicit neural representation (INR) framework is proposed for image reconstruction in PACT with ring transducer arrays to address these issues. pecially, the initial heat distribution is represented as a continuous function of spatial coordinates using a multi-layer perceptron (MLP). The weights of the MLP are then determined by a training process in a self-supervised manner, by minimizing the errors between the measured and model predicted PA signals. After training, PA images can be mapped by feeding the coordinates to the network. Simulation and phantom experiments showed that the INR method performed best in preserving image fidelity and in artefacts suppression for the same acquisition condition, compared to universal back-projection and MB methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13696v1-abstract-full').style.display = 'none'; document.getElementById('2409.13696v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:2406.17578</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.05118">arXiv:2409.05118</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.05118">pdf</a>, <a href="https://arxiv.org/format/2409.05118">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Physics-augmented Deep Learning with Adversarial Domain Adaptation: Applications to STM Image Denoising </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Xie%2C+J">Jianxin Xie</a>, <a href="/search/eess?searchtype=author&amp;query=Ko%2C+W">Wonhee Ko</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+R">Rui-Xing Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Bing Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.05118v2-abstract-short" style="display: inline;"> Image denoising is a critical task in various scientific fields such as medical imaging and material characterization, where the accurate recovery of underlying structures from noisy data is essential. Although supervised denoising techniques have achieved significant advancements, they typically require large datasets of paired clean-noisy images for training. Unsupervised methods, while not reli&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.05118v2-abstract-full').style.display = 'inline'; document.getElementById('2409.05118v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.05118v2-abstract-full" style="display: none;"> Image denoising is a critical task in various scientific fields such as medical imaging and material characterization, where the accurate recovery of underlying structures from noisy data is essential. Although supervised denoising techniques have achieved significant advancements, they typically require large datasets of paired clean-noisy images for training. Unsupervised methods, while not reliant on paired data, typically necessitate a set of unpaired clean images for training, which are not always accessible. In this paper, we propose a physics-augmented deep learning with adversarial domain adaption (PDA-Net) framework for unsupervised image denoising, with applications to denoise real-world scanning tunneling microscopy (STM) images. Our PDA-Net leverages the underlying physics to simulate and envision the ground truth for denoised STM images. Additionally, built upon Generative Adversarial Networks (GANs), we incorporate a cycle-consistency module and a domain adversarial module into our PDA-Net to address the challenge of lacking paired training data and achieve information transfer between the simulated and real experimental domains. Finally, we propose to implement feature alignment and weight-sharing techniques to fully exploit the similarity between simulated and real experimental images, thereby enhancing the denoising performance in both the simulation and experimental domains. Experimental results demonstrate that the proposed PDA-Net successfully enhances the quality of STM images, offering promising applications to enhance scientific discovery and accelerate experimental quantum material research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.05118v2-abstract-full').style.display = 'none'; document.getElementById('2409.05118v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.19544">arXiv:2407.19544</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.19544">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Deep Generative Models-Assisted Automated Labeling for Electron Microscopy Images Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Yuan%2C+W">Wenhao Yuan</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Bingqing Yao</a>, <a href="/search/eess?searchtype=author&amp;query=Tan%2C+S">Shengdong Tan</a>, <a href="/search/eess?searchtype=author&amp;query=You%2C+F">Fengqi You</a>, <a href="/search/eess?searchtype=author&amp;query=He%2C+Q">Qian He</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.19544v1-abstract-short" style="display: inline;"> The rapid advancement of deep learning has facilitated the automated processing of electron microscopy (EM) big data stacks. However, designing a framework that eliminates manual labeling and adapts to domain gaps remains challenging. Current research remains entangled in the dilemma of pursuing complete automation while still requiring simulations or slight manual annotations. Here we demonstrate&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19544v1-abstract-full').style.display = 'inline'; document.getElementById('2407.19544v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.19544v1-abstract-full" style="display: none;"> The rapid advancement of deep learning has facilitated the automated processing of electron microscopy (EM) big data stacks. However, designing a framework that eliminates manual labeling and adapts to domain gaps remains challenging. Current research remains entangled in the dilemma of pursuing complete automation while still requiring simulations or slight manual annotations. Here we demonstrate tandem generative adversarial network (tGAN), a fully label-free and simulation-free pipeline capable of generating EM images for computer vision training. The tGAN can assimilate key features from new data stacks, thus producing a tailored virtual dataset for the training of automated EM analysis tools. Using segmenting nanoparticles for analyzing size distribution of supported catalysts as the demonstration, our findings showcased that the recognition accuracy of tGAN even exceeds the manually-labeling method by 5%. It can also be adaptively deployed to various data domains without further manual manipulation, which is verified by transfer learning from HAADF-STEM to BF-TEM. This generalizability may enable it to extend its application to a broader range of imaging characterizations, liberating microscopists and materials scientists from tedious dataset annotations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19544v1-abstract-full').style.display = 'none'; document.getElementById('2407.19544v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.03772">arXiv:2407.03772</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.03772">pdf</a>, <a href="https://arxiv.org/format/2407.03772">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> CS3: Cascade SAM for Sperm Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Shi%2C+Y">Yi Shi</a>, <a href="/search/eess?searchtype=author&amp;query=Tian%2C+X">Xu-Peng Tian</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+Y">Yun-Kai Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+T">Tie-Yi Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Bin Yao</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+H">Hui Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Shao%2C+Y">Yong Shao</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+C">Cen-Cen Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Zeng%2C+R">Rong Zeng</a>, <a href="/search/eess?searchtype=author&amp;query=Zhan%2C+D">De-Chuan Zhan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.03772v2-abstract-short" style="display: inline;"> Automated sperm morphology analysis plays a crucial role in the assessment of male fertility, yet its efficacy is often compromised by the challenges in accurately segmenting sperm images. Existing segmentation techniques, including the Segment Anything Model(SAM), are notably inadequate in addressing the complex issue of sperm overlap-a frequent occurrence in clinical samples. Our exploratory stu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.03772v2-abstract-full').style.display = 'inline'; document.getElementById('2407.03772v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.03772v2-abstract-full" style="display: none;"> Automated sperm morphology analysis plays a crucial role in the assessment of male fertility, yet its efficacy is often compromised by the challenges in accurately segmenting sperm images. Existing segmentation techniques, including the Segment Anything Model(SAM), are notably inadequate in addressing the complex issue of sperm overlap-a frequent occurrence in clinical samples. Our exploratory studies reveal that modifying image characteristics by removing sperm heads and easily segmentable areas, alongside enhancing the visibility of overlapping regions, markedly enhances SAM&#39;s efficiency in segmenting intricate sperm structures. Motivated by these findings, we present the Cascade SAM for Sperm Segmentation (CS3), an unsupervised approach specifically designed to tackle the issue of sperm overlap. This method employs a cascade application of SAM to segment sperm heads, simple tails, and complex tails in stages. Subsequently, these segmented masks are meticulously matched and joined to construct complete sperm masks. In collaboration with leading medical institutions, we have compiled a dataset comprising approximately 2,000 unlabeled sperm images to fine-tune our method, and secured expert annotations for an additional 240 images to facilitate comprehensive model assessment. Experimental results demonstrate superior performance of CS3 compared to existing methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.03772v2-abstract-full').style.display = 'none'; document.getElementById('2407.03772v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Early accepted by MICCAI2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.19749">arXiv:2406.19749</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.19749">pdf</a>, <a href="https://arxiv.org/format/2406.19749">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SPIRONet: Spatial-Frequency Learning and Topological Channel Interaction Network for Vessel Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Huang%2C+D">De-Xing Huang</a>, <a href="/search/eess?searchtype=author&amp;query=Zhou%2C+X">Xiao-Hu Zhou</a>, <a href="/search/eess?searchtype=author&amp;query=Xie%2C+X">Xiao-Liang Xie</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+S">Shi-Qi Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+S">Shuang-Yi Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+Z">Zhen-Qiu Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Gui%2C+M">Mei-Jiang Gui</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+H">Hao Li</a>, <a href="/search/eess?searchtype=author&amp;query=Xiang%2C+T">Tian-Yu Xiang</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Bo-Xian Yao</a>, <a href="/search/eess?searchtype=author&amp;query=Hou%2C+Z">Zeng-Guang Hou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.19749v1-abstract-short" style="display: inline;"> Automatic vessel segmentation is paramount for developing next-generation interventional navigation systems. However, current approaches suffer from suboptimal segmentation performances due to significant challenges in intraoperative images (i.e., low signal-to-noise ratio, small or slender vessels, and strong interference). In this paper, a novel spatial-frequency learning and topological channel&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.19749v1-abstract-full').style.display = 'inline'; document.getElementById('2406.19749v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.19749v1-abstract-full" style="display: none;"> Automatic vessel segmentation is paramount for developing next-generation interventional navigation systems. However, current approaches suffer from suboptimal segmentation performances due to significant challenges in intraoperative images (i.e., low signal-to-noise ratio, small or slender vessels, and strong interference). In this paper, a novel spatial-frequency learning and topological channel interaction network (SPIRONet) is proposed to address the above issues. Specifically, dual encoders are utilized to comprehensively capture local spatial and global frequency vessel features. Then, a cross-attention fusion module is introduced to effectively fuse spatial and frequency features, thereby enhancing feature discriminability. Furthermore, a topological channel interaction module is designed to filter out task-irrelevant responses based on graph neural networks. Extensive experimental results on several challenging datasets (CADSA, CAXF, DCA1, and XCAD) demonstrate state-of-the-art performances of our method. Moreover, the inference speed of SPIRONet is 21 FPS with a 512x512 input size, surpassing clinical real-time requirements (6~12FPS). These promising outcomes indicate SPIRONet&#39;s potential for integration into vascular interventional navigation systems. Code is available at https://github.com/Dxhuang-CASIA/SPIRONet. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.19749v1-abstract-full').style.display = 'none'; document.getElementById('2406.19749v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.19205">arXiv:2406.19205</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.19205">pdf</a>, <a href="https://arxiv.org/format/2406.19205">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Coordinated RSMA for Integrated Sensing and Communication in Emergency UAV Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Binghan Yao</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+R">Ruoguang Li</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+Y">Yingyang Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+L">Li Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.19205v1-abstract-short" style="display: inline;"> Recently, unmanned aerial vehicle (UAV)-enabled integrated sensing and communication (ISAC) is emerging as a promising technique for achieving robust and rapid emergency response capabilities. Such a novel framework offers high-quality and cost-efficient C\&amp;S services due to the intrinsic flexibility and mobility of UAVs. In parallel, rate-splitting multiple access (RSMA) is able to achieve a tail&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.19205v1-abstract-full').style.display = 'inline'; document.getElementById('2406.19205v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.19205v1-abstract-full" style="display: none;"> Recently, unmanned aerial vehicle (UAV)-enabled integrated sensing and communication (ISAC) is emerging as a promising technique for achieving robust and rapid emergency response capabilities. Such a novel framework offers high-quality and cost-efficient C\&amp;S services due to the intrinsic flexibility and mobility of UAVs. In parallel, rate-splitting multiple access (RSMA) is able to achieve a tailor-made communication by splitting the messages into private and common parts with adjustable rates, making it suitable for on-demand data transmission in disaster scenarios. In this paper, we propose a coordinated RSMA for integrated sensing and communication (CoRSMA-ISAC) scheme in emergency UAV system to facilitate search and rescue operations, where a number of ISAC UAVs simultaneously communicate with multiple communication survivors (CSs) and detect a potentially trapped survivor (TS) in a coordinated manner. Towards this end, an optimization problem is formulated to maximize the weighted sum rate (WSR) of the system, subject to the sensing signal-to-noise ratio (SNR) requirement. In order to solve the formulated non-convex problem, we first decompose it into three subproblems, i.e., UAV-CS association, UAV deployment, as well as beamforming optimization and rate allocation. Subsequently, we introduce an iterative optimization approach leveraging K-Means, successive convex approximation (SCA), and semi-definite relaxation (SDR) algorithms to reframe the subproblems into a more tractable form and efficiently solve them. Simulation results demonstrate that the proposed CoRSMA-ISAC scheme is superior to conventional space division multiple access (SDMA), non-orthogonal multiple access (NOMA), and orthogonal multiple access (OMA) in terms of both communication and sensing performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.19205v1-abstract-full').style.display = 'none'; document.getElementById('2406.19205v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.17578">arXiv:2406.17578</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.17578">pdf</a>, <a href="https://arxiv.org/format/2406.17578">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Sparse-view Signal-domain Photoacoustic Tomography Reconstruction Method Based on Neural Representation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Bowei Yao</a>, <a href="/search/eess?searchtype=author&amp;query=Zeng%2C+Y">Yi Zeng</a>, <a href="/search/eess?searchtype=author&amp;query=Dai%2C+H">Haizhao Dai</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+Q">Qing Wu</a>, <a href="/search/eess?searchtype=author&amp;query=Xiao%2C+Y">Youshen Xiao</a>, <a href="/search/eess?searchtype=author&amp;query=Gao%2C+F">Fei Gao</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+Y">Yuyao Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Yu%2C+J">Jingyi Yu</a>, <a href="/search/eess?searchtype=author&amp;query=Cai%2C+X">Xiran Cai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.17578v1-abstract-short" style="display: inline;"> Photoacoustic tomography is a hybrid biomedical technology, which combines the advantages of acoustic and optical imaging. However, for the conventional image reconstruction method, the image quality is affected obviously by artifacts under the condition of sparse sampling. in this paper, a novel model-based sparse reconstruction method via implicit neural representation was proposed for improving&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.17578v1-abstract-full').style.display = 'inline'; document.getElementById('2406.17578v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.17578v1-abstract-full" style="display: none;"> Photoacoustic tomography is a hybrid biomedical technology, which combines the advantages of acoustic and optical imaging. However, for the conventional image reconstruction method, the image quality is affected obviously by artifacts under the condition of sparse sampling. in this paper, a novel model-based sparse reconstruction method via implicit neural representation was proposed for improving the image quality reconstructed from sparse data. Specially, the initial acoustic pressure distribution was modeled as a continuous function of spatial coordinates, and parameterized by a multi-layer perceptron. The weights of multi-layer perceptron were determined by training the network in self-supervised manner. And the total variation regularization term was used to offer the prior knowledge. We compared our result with some ablation studies, and the results show that out method outperforms existing methods on simulation and experimental data. Under the sparse sampling condition, our method can suppress the artifacts and avoid the ill-posed problem effectively, which reconstruct images with higher signal-to-noise ratio and contrast-to-noise ratio than traditional methods. The high-quality results for sparse data make the proposed method hold the potential for further decreasing the hardware cost of photoacoustic tomography system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.17578v1-abstract-full').style.display = 'none'; document.getElementById('2406.17578v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.08154">arXiv:2403.08154</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.08154">pdf</a>, <a href="https://arxiv.org/format/2403.08154">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> The Effect of Different Optimization Strategies to Physics-Constrained Deep Learning for Soil Moisture Estimation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Xie%2C+J">Jianxin Xie</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Bing Yao</a>, <a href="/search/eess?searchtype=author&amp;query=Jiang%2C+Z">Zheyu Jiang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.08154v1-abstract-short" style="display: inline;"> Soil moisture is a key hydrological parameter that has significant importance to human society and the environment. Accurate modeling and monitoring of soil moisture in crop fields, especially in the root zone (top 100 cm of soil), is essential for improving agricultural production and crop yield with the help of precision irrigation and farming tools. Realizing the full sensor data potential depe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.08154v1-abstract-full').style.display = 'inline'; document.getElementById('2403.08154v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.08154v1-abstract-full" style="display: none;"> Soil moisture is a key hydrological parameter that has significant importance to human society and the environment. Accurate modeling and monitoring of soil moisture in crop fields, especially in the root zone (top 100 cm of soil), is essential for improving agricultural production and crop yield with the help of precision irrigation and farming tools. Realizing the full sensor data potential depends greatly on advanced analytical and predictive domain-aware models. In this work, we propose a physics-constrained deep learning (P-DL) framework to integrate physics-based principles on water transport and water sensing signals for effective reconstruction of the soil moisture dynamics. We adopt three different optimizers, namely Adam, RMSprop, and GD, to minimize the loss function of P-DL during the training process. In the illustrative case study, we demonstrate the empirical convergence of Adam optimizers outperforms the other optimization methods in both mini-batch and full-batch training. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.08154v1-abstract-full').style.display = 'none'; document.getElementById('2403.08154v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.07228">arXiv:2403.07228</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.07228">pdf</a>, <a href="https://arxiv.org/format/2403.07228">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Physics-constrained Active Learning for Soil Moisture Estimation and Optimal Sensor Placement </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Xie%2C+J">Jianxin Xie</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Bing Yao</a>, <a href="/search/eess?searchtype=author&amp;query=Jiang%2C+Z">Zheyu Jiang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.07228v1-abstract-short" style="display: inline;"> Soil moisture is a crucial hydrological state variable that has significant importance to the global environment and agriculture. Precise monitoring of soil moisture in crop fields is critical to reducing agricultural drought and improving crop yield. In-situ soil moisture sensors, which are buried at pre-determined depths and distributed across the field, are promising solutions for monitoring so&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07228v1-abstract-full').style.display = 'inline'; document.getElementById('2403.07228v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.07228v1-abstract-full" style="display: none;"> Soil moisture is a crucial hydrological state variable that has significant importance to the global environment and agriculture. Precise monitoring of soil moisture in crop fields is critical to reducing agricultural drought and improving crop yield. In-situ soil moisture sensors, which are buried at pre-determined depths and distributed across the field, are promising solutions for monitoring soil moisture. However, high-density sensor deployment is neither economically feasible nor practical. Thus, to achieve a higher spatial resolution of soil moisture dynamics using a limited number of sensors, we integrate a physics-based agro-hydrological model based on Richards&#39; equation in a physics-constrained deep learning framework to accurately predict soil moisture dynamics in the soil&#39;s root zone. This approach ensures that soil moisture estimates align well with sensor observations while obeying physical laws at the same time. Furthermore, to strategically identify the locations for sensor placement, we introduce a novel active learning framework that combines space-filling design and physics residual-based sampling to maximize data acquisition potential with limited sensors. Our numerical results demonstrate that integrating Physics-constrained Deep Learning (P-DL) with an active learning strategy within a unified framework--named the Physics-constrained Active Learning (P-DAL) framework--significantly improves the predictive accuracy and effectiveness of field-scale soil moisture monitoring using in-situ sensors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07228v1-abstract-full').style.display = 'none'; document.getElementById('2403.07228v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.15096">arXiv:2306.15096</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.15096">pdf</a>, <a href="https://arxiv.org/format/2306.15096">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Automated Identication of Atrial Fibrillation from Single-lead ECGs Using Multi-branching ResNet </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Xie%2C+J">Jianxin Xie</a>, <a href="/search/eess?searchtype=author&amp;query=Stavrakis%2C+S">Stavros Stavrakis</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Bing Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.15096v1-abstract-short" style="display: inline;"> Atrial fibrillation (AF) is the most common cardiac arrhythmia, which is clinically identified with irregular and rapid heartbeat rhythm. AF puts a patient at risk of forming blood clots, which can eventually lead to heart failure, stroke, or even sudden death. It is of critical importance to develop an advanced analytical model that can effectively interpret the electrocardiography (ECG) signals&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.15096v1-abstract-full').style.display = 'inline'; document.getElementById('2306.15096v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.15096v1-abstract-full" style="display: none;"> Atrial fibrillation (AF) is the most common cardiac arrhythmia, which is clinically identified with irregular and rapid heartbeat rhythm. AF puts a patient at risk of forming blood clots, which can eventually lead to heart failure, stroke, or even sudden death. It is of critical importance to develop an advanced analytical model that can effectively interpret the electrocardiography (ECG) signals and provide decision support for accurate AF diagnostics. In this paper, we propose an innovative deep-learning method for automated AF identification from single-lead ECGs. We first engage the continuous wavelet transform (CWT) to extract time-frequency features from ECG signals. Then, we develop a convolutional neural network (CNN) structure that incorporates ResNet for effective network training and multi-branching architectures for addressing the imbalanced data issue to process the 2D time-frequency features for AF classification. We evaluate the proposed methodology using two real-world ECG databases. The experimental results show a superior performance of our method compared with traditional deep learning models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.15096v1-abstract-full').style.display = 'none'; document.getElementById('2306.15096v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.11408">arXiv:2210.11408</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2210.11408">pdf</a>, <a href="https://arxiv.org/format/2210.11408">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Hierarchical Deep Learning with Generative Adversarial Network for Automatic Cardiac Diagnosis from ECG Signals </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Wang%2C+Z">Zekai Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Stavrakis%2C+S">Stavros Stavrakis</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Bing Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.11408v1-abstract-short" style="display: inline;"> Cardiac disease is the leading cause of death in the US. Accurate heart disease detection is of critical importance for timely medical treatment to save patients&#39; lives. Routine use of electrocardiogram (ECG) is the most common method for physicians to assess the electrical activities of the heart and detect possible abnormal cardiac conditions. Fully utilizing the ECG data for reliable heart dise&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.11408v1-abstract-full').style.display = 'inline'; document.getElementById('2210.11408v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.11408v1-abstract-full" style="display: none;"> Cardiac disease is the leading cause of death in the US. Accurate heart disease detection is of critical importance for timely medical treatment to save patients&#39; lives. Routine use of electrocardiogram (ECG) is the most common method for physicians to assess the electrical activities of the heart and detect possible abnormal cardiac conditions. Fully utilizing the ECG data for reliable heart disease detection depends on developing effective analytical models. In this paper, we propose a two-level hierarchical deep learning framework with Generative Adversarial Network (GAN) for automatic diagnosis of ECG signals. The first-level model is composed of a Memory-Augmented Deep auto-Encoder with GAN (MadeGAN), which aims to differentiate abnormal signals from normal ECGs for anomaly detection. The second-level learning aims at robust multi-class classification for different arrhythmias identification, which is achieved by integrating the transfer learning technique to transfer knowledge from the first-level learning with the multi-branching architecture to handle the data-lacking and imbalanced data issue. We evaluate the performance of the proposed framework using real-world medical data from the MIT-BIH arrhythmia database. Experimental results show that our proposed model outperforms existing methods that are commonly used in current practice. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.11408v1-abstract-full').style.display = 'none'; document.getElementById('2210.11408v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.08136">arXiv:2101.08136</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.08136">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s11433-021-1730-x">10.1007/s11433-021-1730-x <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> High-throughput fast full-color digital pathology based on Fourier ptychographic microscopy via color transfer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Gao%2C+Y">Yuting Gao</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+J">Jiurun Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+A">Aiye Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Pan%2C+A">An Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Ma%2C+C">Caiwen Ma</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Baoli Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.08136v1-abstract-short" style="display: inline;"> Full-color imaging is significant in digital pathology. Compared with a grayscale image or a pseudo-color image that only contains the contrast information, it can identify and detect the target object better with color texture information. Fourier ptychographic microscopy (FPM) is a high-throughput computational imaging technique that breaks the tradeoff between high resolution (HR) and large fie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.08136v1-abstract-full').style.display = 'inline'; document.getElementById('2101.08136v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.08136v1-abstract-full" style="display: none;"> Full-color imaging is significant in digital pathology. Compared with a grayscale image or a pseudo-color image that only contains the contrast information, it can identify and detect the target object better with color texture information. Fourier ptychographic microscopy (FPM) is a high-throughput computational imaging technique that breaks the tradeoff between high resolution (HR) and large field-of-view (FOV), which eliminates the artifacts of scanning and stitching in digital pathology and improves its imaging efficiency. However, the conventional full-color digital pathology based on FPM is still time-consuming due to the repeated experiments with tri-wavelengths. A color transfer FPM approach, termed CFPM was reported. The color texture information of a low resolution (LR) full-color pathologic image is directly transferred to the HR grayscale FPM image captured by only a single wavelength. The color space of FPM based on the standard CIE-XYZ color model and display based on the standard RGB (sRGB) color space were established. Different FPM colorization schemes were analyzed and compared with thirty different biological samples. The average root-mean-square error (RMSE) of the conventional method and CFPM compared with the ground truth is 5.3% and 5.7%, respectively. Therefore, the acquisition time is significantly reduced by 2/3 with the sacrifice of precision of only 0.4%. And CFPM method is also compatible with advanced fast FPM approaches to reduce computation time further. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.08136v1-abstract-full').style.display = 'none'; document.getElementById('2101.08136v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.03138">arXiv:2009.03138</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.03138">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.optlaseng.2022.107408">10.1016/j.optlaseng.2022.107408 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Edge effect removal in Fourier ptychographic microscopy via periodic plus smooth image decomposition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Pan%2C+A">An Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+A">Aiye Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Zheng%2C+J">Junfu Zheng</a>, <a href="/search/eess?searchtype=author&amp;query=Gao%2C+Y">Yuting Gao</a>, <a href="/search/eess?searchtype=author&amp;query=Ma%2C+C">Caiwen Ma</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Baoli Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.03138v3-abstract-short" style="display: inline;"> Fourier ptychographic microscopy (FPM) is a promising computational imaging technique with high resolution, wide field-of-view (FOV) and quantitative phase recovery. So far, a series of system errors that may corrupt the image quality of FPM has been reported. However, an imperceptible artifact caused by edge effect caught our attention and may also degrade the precision of phase imaging in FPM wi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.03138v3-abstract-full').style.display = 'inline'; document.getElementById('2009.03138v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.03138v3-abstract-full" style="display: none;"> Fourier ptychographic microscopy (FPM) is a promising computational imaging technique with high resolution, wide field-of-view (FOV) and quantitative phase recovery. So far, a series of system errors that may corrupt the image quality of FPM has been reported. However, an imperceptible artifact caused by edge effect caught our attention and may also degrade the precision of phase imaging in FPM with a cross-shape artifact in the Fourier space. We found that the precision of reconstructed phase at the same subregion depends on the different sizes of block processing as a result of different edge conditions, which limits the quantitative phase measurements via FPM. And this artifact is caused by the aperiodic image extension of fast Fourier transform (FFT). Herein, to remove the edge effect and improve the accuracy, two classes of opposite algorithms termed discrete cosine transform (DCT) and perfect Fourier transform (PFT) were reported respectively and discussed systematically. Although both approaches can remove the artifacts in FPM and may be extended to other Fourier analysis techniques, PFT has a comparable efficiency to conventional FFT. The PFT algorithm improves the standard deviation of phase accuracy as a factor of 4 from 0.08 radians to 0.02 radians. Finally, we summarized and discussed all the reported system errors of FPM within a generalized model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.03138v3-abstract-full').style.display = 'none'; document.getElementById('2009.03138v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 8 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Optics and Lasers in Engineering 162, 107408 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.00388">arXiv:2008.00388</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2008.00388">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1364/OL.409258">10.1364/OL.409258 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Coherent synthetic aperture imaging for visible remote sensing via reflective Fourier ptychography </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Xiang%2C+M">Meng Xiang</a>, <a href="/search/eess?searchtype=author&amp;query=Pan%2C+A">An Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+Y">Yiyi Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Fan%2C+X">Xuewu Fan</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+H">Hui Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+C">Chuang Li</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Baoli Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.00388v1-abstract-short" style="display: inline;"> Synthetic aperture radar (SAR) can measure the phase with antenna and microwave, which cannot be directly extended to visible light imaging due to phase lost. In this letter, we reported an active remote sensing with visible light via reflective Fourier ptychography (FP), termed coherent synthetic aperture imaging (CSAI), achieving high resolution, wide field-of-view (FOV) and phase recovery. A pr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.00388v1-abstract-full').style.display = 'inline'; document.getElementById('2008.00388v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.00388v1-abstract-full" style="display: none;"> Synthetic aperture radar (SAR) can measure the phase with antenna and microwave, which cannot be directly extended to visible light imaging due to phase lost. In this letter, we reported an active remote sensing with visible light via reflective Fourier ptychography (FP), termed coherent synthetic aperture imaging (CSAI), achieving high resolution, wide field-of-view (FOV) and phase recovery. A proof-of-concept experiment was reported with laser scanning and a collimator for the infinite object. Both smooth and rough objects are tested, and the spatial resolution increased from 15.6 um to 3.48 um with a factor of 4.5. The speckle noise can be suppressed by FP unexpectedly. Meanwhile, the CSAI method may replace the adaptive optics to tackle the aberration induced from atmospheric turbulence and optical system by one-step deconvolution. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.00388v1-abstract-full').style.display = 'none'; document.getElementById('2008.00388v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1912.00804">arXiv:1912.00804</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1912.00804">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Biological Physics">physics.bio-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> In situ correction of liquid meniscus in cell culture imaging system based on parallel Fourier ptychographic microscopy (96 Eyes) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Pan%2C+A">An Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Chan%2C+A+C+S">Antony C. S. Chan</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Baoli Yao</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+C">Changhuei Yang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1912.00804v2-abstract-short" style="display: inline;"> We collaborated with Amgen and spent five years in designing and fabricating next generation multi-well plate imagers based on Fourier ptychographic microscopy (FPM). A 6-well imager (Emsight) and a low-cost parallel microscopic system (96 Eyes) based on parallel FPM were reported in our previous work. However, the effect of liquid meniscus on the image quality is much stronger than anticipated, i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1912.00804v2-abstract-full').style.display = 'inline'; document.getElementById('1912.00804v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1912.00804v2-abstract-full" style="display: none;"> We collaborated with Amgen and spent five years in designing and fabricating next generation multi-well plate imagers based on Fourier ptychographic microscopy (FPM). A 6-well imager (Emsight) and a low-cost parallel microscopic system (96 Eyes) based on parallel FPM were reported in our previous work. However, the effect of liquid meniscus on the image quality is much stronger than anticipated, introducing obvious wavevector misalignment and additional image aberration. To this end, an adaptive wavevector correction (AWC-FPM) algorithm and a pupil recovery improvement strategy are presented to solve these challenges in situ. In addition, dual-channel fluorescence excitation is added to obtain structural information for microbiologists. Experiments are demonstrated to verify their performances. The accuracy of angular resolution with our algorithm is within 0.003 rad. Our algorithms would make the FPM algorithm more robust and practical and can be extended to other FPM-based applications to overcome similar challenges. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1912.00804v2-abstract-full').style.display = 'none'; document.getElementById('1912.00804v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 December, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 12 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.08778">arXiv:1907.08778</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1907.08778">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/2040-8986/aba0fc">10.1088/2040-8986/aba0fc <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Retrieval of non-sparse object through scattering media beyond the memory effect </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhou%2C+M">Meiling Zhou</a>, <a href="/search/eess?searchtype=author&amp;query=Pan%2C+A">An Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+R">Runze Li</a>, <a href="/search/eess?searchtype=author&amp;query=Liang%2C+Y">Yansheng Liang</a>, <a href="/search/eess?searchtype=author&amp;query=Min%2C+J">Junwei Min</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tong Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Bai%2C+C">Chen Bai</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Baoli Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.08778v1-abstract-short" style="display: inline;"> Optical imaging through scattering media is a commonly confronted with the problem of reconstruction of complex objects and optical memory effect. To solve the problem, here, we propose a novel configuration based on the combination of ptychography and shower-curtain effect, which enables the retrieval of non-sparse samples through scattering media beyond the memory effect. Furthermore, by virtue&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.08778v1-abstract-full').style.display = 'inline'; document.getElementById('1907.08778v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.08778v1-abstract-full" style="display: none;"> Optical imaging through scattering media is a commonly confronted with the problem of reconstruction of complex objects and optical memory effect. To solve the problem, here, we propose a novel configuration based on the combination of ptychography and shower-curtain effect, which enables the retrieval of non-sparse samples through scattering media beyond the memory effect. Furthermore, by virtue of the shower-curtain effect, the proposed imaging system is insensitive to dynamic scattering media. Results from the retrieval of hair follicle section demonstrate the effectiveness and feasibility of the proposed method. The field of view is improved to 2.64mm. This present technique will be a potential approach for imaging through deep biological tissue. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.08778v1-abstract-full').style.display = 'none'; document.getElementById('1907.08778v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1709.07747">arXiv:1709.07747</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1709.07747">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> SNR-based adaptive acquisition method for fast Fourier ptychographic microscopy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Pan%2C+A">An Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+Y">Yan Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+M">Maosen Li</a>, <a href="/search/eess?searchtype=author&amp;query=Zhou%2C+M">Meiling Zhou</a>, <a href="/search/eess?searchtype=author&amp;query=Min%2C+J">Junwei Min</a>, <a href="/search/eess?searchtype=author&amp;query=Lei%2C+M">Ming Lei</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Baoli Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1709.07747v2-abstract-short" style="display: inline;"> Fourier ptychographic microscopy (FPM) is a computational imaging technique with both high resolution and large field-of-view. However, the effective numerical aperture (NA) achievable with a typical LED panel is ambiguous and usually relies on the repeated tests of different illumination NAs. The imaging quality of each raw image usually depends on the visual assessments, which is subjective and&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1709.07747v2-abstract-full').style.display = 'inline'; document.getElementById('1709.07747v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1709.07747v2-abstract-full" style="display: none;"> Fourier ptychographic microscopy (FPM) is a computational imaging technique with both high resolution and large field-of-view. However, the effective numerical aperture (NA) achievable with a typical LED panel is ambiguous and usually relies on the repeated tests of different illumination NAs. The imaging quality of each raw image usually depends on the visual assessments, which is subjective and inaccurate especially for those dark field images. Moreover, the acquisition process is really time-consuming.In this paper, we propose a SNR-based adaptive acquisition method for quantitative evaluation and adaptive collection of each raw image according to the signal-to-noise ration (SNR) value, to improve the FPM&#39;s acquisition efficiency and automatically obtain the maximum achievable NA, reducing the time of collection, storage and subsequent calculation. The widely used EPRY-FPM algorithm is applied without adding any algorithm complexity and computational burden. The performance has been demonstrated in both USAF targets and biological samples with different imaging sensors respectively, which have either Poisson or Gaussian noises model. Further combined with the sparse LEDs strategy, the number of collection images can be shorten to around 25 frames while the former needs 361 images, the reduction ratio can reach over 90%. This method will make FPM more practical and automatic, and can also be used in different configurations of FPM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1709.07747v2-abstract-full').style.display = 'none'; document.getElementById('1709.07747v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 September, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 6 figures</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10