CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;45 of 45 results for author: <span class="mathjax">Peng, T</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/eess" aria-role="search"> Searching in archive <strong>eess</strong>. <a href="/search/?searchtype=author&amp;query=Peng%2C+T">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Peng, T"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Peng%2C+T&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Peng, T"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06510">arXiv:2502.06510</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.06510">pdf</a>, <a href="https://arxiv.org/format/2502.06510">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Three-Dimensional MRI Reconstruction with Gaussian Representations: Tackling the Undersampling Problem </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tengya Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Zha%2C+R">Ruyi Zha</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Z">Zhen Li</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+X">Xiaofeng Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Zou%2C+Q">Qing Zou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06510v1-abstract-short" style="display: inline;"> Three-Dimensional Gaussian Splatting (3DGS) has shown substantial promise in the field of computer vision, but remains unexplored in the field of magnetic resonance imaging (MRI). This study explores its potential for the reconstruction of isotropic resolution 3D MRI from undersampled k-space data. We introduce a novel framework termed 3D Gaussian MRI (3DGSMR), which employs 3D Gaussian distributi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06510v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06510v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06510v1-abstract-full" style="display: none;"> Three-Dimensional Gaussian Splatting (3DGS) has shown substantial promise in the field of computer vision, but remains unexplored in the field of magnetic resonance imaging (MRI). This study explores its potential for the reconstruction of isotropic resolution 3D MRI from undersampled k-space data. We introduce a novel framework termed 3D Gaussian MRI (3DGSMR), which employs 3D Gaussian distributions as an explicit representation for MR volumes. Experimental evaluations indicate that this method can effectively reconstruct voxelized MR images, achieving a quality on par with that of well-established 3D MRI reconstruction techniques found in the literature. Notably, the 3DGSMR scheme operates under a self-supervised framework, obviating the need for extensive training datasets or prior model training. This approach introduces significant innovations to the domain, notably the adaptation of 3DGS to MRI reconstruction and the novel application of the existing 3DGS methodology to decompose MR signals, which are presented in a complex-valued format. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06510v1-abstract-full').style.display = 'none'; document.getElementById('2502.06510v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03974">arXiv:2502.03974</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.03974">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Spatiotemporal Trajectory Tracking Method for Vehicles Incorporating Lead-Lag Judgement </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Li%2C+Y">Yuan Li</a>, <a href="/search/eess?searchtype=author&amp;query=Dong%2C+X">Xiang Dong</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+T">Tao Li</a>, <a href="/search/eess?searchtype=author&amp;query=Hao%2C+J">Junfeng Hao</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+X">Xiaoxue Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Ullaha%2C+S">Sana Ullaha</a>, <a href="/search/eess?searchtype=author&amp;query=Cai%2C+Y">Yincai Cai</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+P">Peng Wu</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Ting Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03974v1-abstract-short" style="display: inline;"> In the domain of intelligent transportation systems, especially within the context of autonomous vehicle control, the preemptive holistic collaborative system has been presented as a promising solution to bring a remarkable enhancement in traffic efficiency and a substantial reduction in the accident rate, demonstrating a great potential of development. In order to ensure this system operates as i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03974v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03974v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03974v1-abstract-full" style="display: none;"> In the domain of intelligent transportation systems, especially within the context of autonomous vehicle control, the preemptive holistic collaborative system has been presented as a promising solution to bring a remarkable enhancement in traffic efficiency and a substantial reduction in the accident rate, demonstrating a great potential of development. In order to ensure this system operates as intended, accurate tracking of the spatiotemporal trajectory is of crucial significance. Moreover, minimizing the tracking error is a necessary step in this process. To this end, a novel lead-lag judgment mechanism is proposed. This mechanism precisely quantifies the longitudinal positional deviation between the vehicle and the target trajectory over time, then the deviation is corrected with a real - time acceleration compensation strategy, as a result, the accuracy and reliability of trajectory tracking are significantly enhanced. Real - vehicle experiments were conducted in a dedicated test field to validate the feasibility of this innovative approach empirically. Subsequently, the obtained tracking data was subsequent processed using the lead-lag judgment mechanism. In this step, we carefully analyzed the spatiotemporal error patterns between the vehicle and the target trajectory under different alignments and speeds. Finally, using real highway speed and alignment data, we conducted comprehensive spatiotemporal trajectory tracking simulations. Through experiments and simulations, tracking errors maintained in an acceptable range and reasonable spatiotemporal distance is given during the preemptive merging process on highway ramps. Overall, this study offers valuable insights for highway ramp emerging safety. Future work can expand on these findings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03974v1-abstract-full').style.display = 'none'; document.getElementById('2502.03974v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.15307">arXiv:2412.15307</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.15307">pdf</a>, <a href="https://arxiv.org/format/2412.15307">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Federated Learning for Coronary Artery Plaque Detection in Atherosclerosis Using IVUS Imaging: A Multi-Hospital Collaboration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Hsiao%2C+C">Chiu-Han Hsiao</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+K">Kai Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tsung-Yu Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Huang%2C+W">Wei-Chieh Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.15307v1-abstract-short" style="display: inline;"> The traditional interpretation of Intravascular Ultrasound (IVUS) images during Percutaneous Coronary Intervention (PCI) is time-intensive and inconsistent, relying heavily on physician expertise. Regulatory restrictions and privacy concerns further hinder data integration across hospital systems, complicating collaborative analysis. To address these challenges, a parallel 2D U-Net model with a mu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.15307v1-abstract-full').style.display = 'inline'; document.getElementById('2412.15307v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.15307v1-abstract-full" style="display: none;"> The traditional interpretation of Intravascular Ultrasound (IVUS) images during Percutaneous Coronary Intervention (PCI) is time-intensive and inconsistent, relying heavily on physician expertise. Regulatory restrictions and privacy concerns further hinder data integration across hospital systems, complicating collaborative analysis. To address these challenges, a parallel 2D U-Net model with a multi-stage segmentation architecture has been developed, utilizing federated learning to enable secure data analysis across institutions while preserving privacy. The model segments plaques by identifying and subtracting the External Elastic Membrane (EEM) and lumen areas, with preprocessing converting Cartesian to polar coordinates for improved computational efficiency. Achieving a Dice Similarity Coefficient (DSC) of 0.706, the model effectively identifies plaques and detects circular boundaries in real-time. Collaborative efforts with domain experts enhance plaque burden interpretation through precise quantitative measurements. Future advancements may involve integrating advanced federated learning techniques and expanding datasets to further improve performance and applicability. This adaptable technology holds promise for environments handling sensitive, distributed data, offering potential to optimize outcomes in medical imaging and intervention. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.15307v1-abstract-full').style.display = 'none'; document.getElementById('2412.15307v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.19666">arXiv:2411.19666</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.19666">pdf</a>, <a href="https://arxiv.org/format/2411.19666">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applications">stat.AP</span> </div> </div> <p class="title is-5 mathjax"> Multimodal Whole Slide Foundation Model for Pathology </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Ding%2C+T">Tong Ding</a>, <a href="/search/eess?searchtype=author&amp;query=Wagner%2C+S+J">Sophia J. Wagner</a>, <a href="/search/eess?searchtype=author&amp;query=Song%2C+A+H">Andrew H. Song</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+R+J">Richard J. Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Lu%2C+M+Y">Ming Y. Lu</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+A">Andrew Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Vaidya%2C+A+J">Anurag J. Vaidya</a>, <a href="/search/eess?searchtype=author&amp;query=Jaume%2C+G">Guillaume Jaume</a>, <a href="/search/eess?searchtype=author&amp;query=Shaban%2C+M">Muhammad Shaban</a>, <a href="/search/eess?searchtype=author&amp;query=Kim%2C+A">Ahrong Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Williamson%2C+D+F+K">Drew F. K. Williamson</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+B">Bowen Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Almagro-Perez%2C+C">Cristina Almagro-Perez</a>, <a href="/search/eess?searchtype=author&amp;query=Doucet%2C+P">Paul Doucet</a>, <a href="/search/eess?searchtype=author&amp;query=Sahai%2C+S">Sharifa Sahai</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+C">Chengkuan Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Komura%2C+D">Daisuke Komura</a>, <a href="/search/eess?searchtype=author&amp;query=Kawabe%2C+A">Akihiro Kawabe</a>, <a href="/search/eess?searchtype=author&amp;query=Ishikawa%2C+S">Shumpei Ishikawa</a>, <a href="/search/eess?searchtype=author&amp;query=Gerber%2C+G">Georg Gerber</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tingying Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Le%2C+L+P">Long Phi Le</a>, <a href="/search/eess?searchtype=author&amp;query=Mahmood%2C+F">Faisal Mahmood</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.19666v1-abstract-short" style="display: inline;"> The field of computational pathology has been transformed with recent advances in foundation models that encode histopathology region-of-interests (ROIs) into versatile and transferable feature representations via self-supervised learning (SSL). However, translating these advancements to address complex clinical challenges at the patient and slide level remains constrained by limited clinical data&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.19666v1-abstract-full').style.display = 'inline'; document.getElementById('2411.19666v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.19666v1-abstract-full" style="display: none;"> The field of computational pathology has been transformed with recent advances in foundation models that encode histopathology region-of-interests (ROIs) into versatile and transferable feature representations via self-supervised learning (SSL). However, translating these advancements to address complex clinical challenges at the patient and slide level remains constrained by limited clinical data in disease-specific cohorts, especially for rare clinical conditions. We propose TITAN, a multimodal whole slide foundation model pretrained using 335,645 WSIs via visual self-supervised learning and vision-language alignment with corresponding pathology reports and 423,122 synthetic captions generated from a multimodal generative AI copilot for pathology. Without any finetuning or requiring clinical labels, TITAN can extract general-purpose slide representations and generate pathology reports that generalize to resource-limited clinical scenarios such as rare disease retrieval and cancer prognosis. We evaluate TITAN on diverse clinical tasks and find that TITAN outperforms both ROI and slide foundation models across machine learning settings such as linear probing, few-shot and zero-shot classification, rare cancer retrieval and cross-modal retrieval, and pathology report generation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.19666v1-abstract-full').style.display = 'none'; document.getElementById('2411.19666v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The code is accessible at https://github.com/mahmoodlab/TITAN</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01918">arXiv:2411.01918</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01918">pdf</a>, <a href="https://arxiv.org/format/2411.01918">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Preemptive Holistic Collaborative System and Its Application in Road Transportation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Li%2C+Y">Yuan Li</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+T">Tao Li</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+X">Xiaoxue Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Dong%2C+X">Xiang Dong</a>, <a href="/search/eess?searchtype=author&amp;query=Cai%2C+Y">Yincai Cai</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Ting Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01918v3-abstract-short" style="display: inline;"> Numerous real-world systems, including manufacturing processes, supply chains, and robotic systems, involve multiple independent entities with diverse objectives. The potential for conflicts arises from the inability of these entities to accurately predict and anticipate each other&#39;s actions. To address this challenge, we propose the Preemptive Holistic Collaborative System (PHCS) framework. By en&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01918v3-abstract-full').style.display = 'inline'; document.getElementById('2411.01918v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01918v3-abstract-full" style="display: none;"> Numerous real-world systems, including manufacturing processes, supply chains, and robotic systems, involve multiple independent entities with diverse objectives. The potential for conflicts arises from the inability of these entities to accurately predict and anticipate each other&#39;s actions. To address this challenge, we propose the Preemptive Holistic Collaborative System (PHCS) framework. By enabling information sharing and collaborative planning among independent entities, the PHCS facilitates the preemptive resolution of potential conflicts. We apply the PHCS framework to the specific context of road transportation, resulting in the Preemptive Holistic Collaborative Road Transportation System (PHCRTS). This system leverages shared driving intentions and pre-planned trajectories to optimize traffic flow and enhance safety. Simulation experiments in a two-lane merging scenario demonstrate the effectiveness of PHCRTS, reducing vehicle time delays by 90%, increasing traffic capacity by 300%, and eliminating accidents. The PHCS framework offers a promising approach to optimize the performance and safety of complex systems with multiple independent entities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01918v3-abstract-full').style.display = 'none'; document.getElementById('2411.01918v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.06170">arXiv:2410.06170</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.06170">pdf</a>, <a href="https://arxiv.org/format/2410.06170">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> QGym: Scalable Simulation and Benchmarking of Queuing Network Controllers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Chen%2C+H">Haozhe Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+A">Ang Li</a>, <a href="/search/eess?searchtype=author&amp;query=Che%2C+E">Ethan Che</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tianyi Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Dong%2C+J">Jing Dong</a>, <a href="/search/eess?searchtype=author&amp;query=Namkoong%2C+H">Hongseok Namkoong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.06170v1-abstract-short" style="display: inline;"> Queuing network control determines the allocation of scarce resources to manage congestion, a fundamental problem in manufacturing, communications, and healthcare. Compared to standard RL problems, queueing problems are distinguished by unique challenges: i) a system operating in continuous time, ii) high stochasticity, and iii) long horizons over which the system can become unstable (exploding de&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06170v1-abstract-full').style.display = 'inline'; document.getElementById('2410.06170v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.06170v1-abstract-full" style="display: none;"> Queuing network control determines the allocation of scarce resources to manage congestion, a fundamental problem in manufacturing, communications, and healthcare. Compared to standard RL problems, queueing problems are distinguished by unique challenges: i) a system operating in continuous time, ii) high stochasticity, and iii) long horizons over which the system can become unstable (exploding delays). To spur methodological progress tackling these challenges, we present an open-sourced queueing simulation framework, QGym, that benchmark queueing policies across realistic problem instances. Our modular framework allows the researchers to build on our initial instances, which provide a wide range of environments including parallel servers, criss-cross, tandem, and re-entrant networks, as well as a realistically calibrated hospital queuing system. QGym makes it easy to compare multiple policies, including both model-free RL methods and classical queuing policies. Our testbed complements the traditional focus on evaluating algorithms based on mathematical guarantees in idealized settings, and significantly expands the scope of empirical benchmarking in prior work. QGym code is open-sourced at https://github.com/namkoong-lab/QGym. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06170v1-abstract-full').style.display = 'none'; document.getElementById('2410.06170v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.08153">arXiv:2409.08153</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.08153">pdf</a>, <a href="https://arxiv.org/format/2409.08153">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Dark Experience for Incremental Keyword Spotting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tianyi Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Xiao%2C+Y">Yang Xiao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.08153v3-abstract-short" style="display: inline;"> Spoken keyword spotting (KWS) is crucial for identifying keywords within audio inputs and is widely used in applications like Apple Siri and Google Home, particularly on edge devices. Current deep learning-based KWS systems, which are typically trained on a limited set of keywords, can suffer from performance degradation when encountering new domains, a challenge often addressed through few-shot f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.08153v3-abstract-full').style.display = 'inline'; document.getElementById('2409.08153v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.08153v3-abstract-full" style="display: none;"> Spoken keyword spotting (KWS) is crucial for identifying keywords within audio inputs and is widely used in applications like Apple Siri and Google Home, particularly on edge devices. Current deep learning-based KWS systems, which are typically trained on a limited set of keywords, can suffer from performance degradation when encountering new domains, a challenge often addressed through few-shot fine-tuning. However, this adaptation frequently leads to catastrophic forgetting, where the model&#39;s performance on original data deteriorates. Progressive continual learning (CL) strategies have been proposed to overcome this, but they face limitations such as the need for task-ID information and increased storage, making them less practical for lightweight devices. To address these challenges, we introduce Dark Experience for Keyword Spotting (DE-KWS), a novel CL approach that leverages dark knowledge to distill past experiences throughout the training process. DE-KWS combines rehearsal and distillation, using both ground truth labels and logits stored in a memory buffer to maintain model performance across tasks. Evaluations on the Google Speech Command dataset show that DE-KWS outperforms existing CL baselines in average accuracy without increasing model size, offering an effective solution for resource-constrained edge devices. The scripts are available on GitHub for the future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.08153v3-abstract-full').style.display = 'none'; document.getElementById('2409.08153v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ICASSP 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.08121">arXiv:2408.08121</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.08121">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ACCESS.2025.3539370">10.1109/ACCESS.2025.3539370 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Enhancing Expressway Ramp Merge Safety and Efficiency via Spatiotemporal Cooperative Control </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Ting Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+X">Xiaoxue Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Y">Yuan Li</a>, <a href="/search/eess?searchtype=author&amp;query=WU%2C+J">Jie WU</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+T">Tao Li</a>, <a href="/search/eess?searchtype=author&amp;query=Dong%2C+X">Xiang Dong</a>, <a href="/search/eess?searchtype=author&amp;query=Cai%2C+Y">Yincai Cai</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+P">Peng Wu</a>, <a href="/search/eess?searchtype=author&amp;query=Ullah%2C+S">Sana Ullah</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.08121v3-abstract-short" style="display: inline;"> In the context of autonomous driving on expressways, the issue of ensuring safe and efficient ramp merging remains a significant challenge. Existing systems often struggle to accurately assess the status and intentions of other vehicles, leading to a persistent occurrence of accidents despite efforts to maintain safe distances. This study proposes a novel spatiotemporal cooperative control approac&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.08121v3-abstract-full').style.display = 'inline'; document.getElementById('2408.08121v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.08121v3-abstract-full" style="display: none;"> In the context of autonomous driving on expressways, the issue of ensuring safe and efficient ramp merging remains a significant challenge. Existing systems often struggle to accurately assess the status and intentions of other vehicles, leading to a persistent occurrence of accidents despite efforts to maintain safe distances. This study proposes a novel spatiotemporal cooperative control approach integrating vehicle-road coordination to address this critical issue. A comprehensive methodology is developed, beginning with the calculation of safe distances under varying spatiotemporal conditions. This involves considering multiple factors, including vehicle speed differentials, positioning errors, and clock synchronization errors. Subsequently, an advanced vehicle conflict risk evaluation model is constructed. By incorporating collision acceleration and emergency acceleration as key parameters, this model offers a more accurate and detailed evaluation of potential risks during the ramp merging process. Based on the calculated safe distances and conflict risk evaluations, a mainline priority coordinated control method is formulated. This method enables the pre-planning of vehicle trajectories, effectively reducing conflicts among vehicles. Through rigorous simulations using diverse traffic volume and speed scenarios, the efficacy of the proposed strategy is validated. The results demonstrate remarkable improvements, with the average delay time reduced by an impressive 97.96% and fuel consumption decreased by 6.01%. These outcomes indicate that the proposed approach not only enhances the speed of vehicle merging but also significantly reduces latency and fuel consumption, thereby enhancing the overall performance of ramp merging operations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.08121v3-abstract-full').style.display = 'none'; document.getElementById('2408.08121v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Access, vol. 13, pp. 25664-25682, 2025 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.05151">arXiv:2408.05151</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.05151">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Meta-Learning Guided Label Noise Distillation for Robust Signal Modulation Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Hao%2C+X">Xiaoyang Hao</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+Z">Zhixi Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tongqing Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+S">Shuyuan Yang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.05151v1-abstract-short" style="display: inline;"> Automatic modulation classification (AMC) is an effective way to deal with physical layer threats of the internet of things (IoT). However, there is often label mislabeling in practice, which significantly impacts the performance and robustness of deep neural networks (DNNs). In this paper, we propose a meta-learning guided label noise distillation method for robust AMC. Specifically, a teacher-st&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.05151v1-abstract-full').style.display = 'inline'; document.getElementById('2408.05151v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.05151v1-abstract-full" style="display: none;"> Automatic modulation classification (AMC) is an effective way to deal with physical layer threats of the internet of things (IoT). However, there is often label mislabeling in practice, which significantly impacts the performance and robustness of deep neural networks (DNNs). In this paper, we propose a meta-learning guided label noise distillation method for robust AMC. Specifically, a teacher-student heterogeneous network (TSHN) framework is proposed to distill and reuse label noise. Based on the idea that labels are representations, the teacher network with trusted meta-learning divides and conquers untrusted label samples and then guides the student network to learn better by reassessing and correcting labels. Furthermore, we propose a multi-view signal (MVS) method to further improve the performance of hard-to-classify categories with few-shot trusted label samples. Extensive experimental results show that our methods can significantly improve the performance and robustness of signal AMC in various and complex label noise scenarios, which is crucial for securing IoT applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.05151v1-abstract-full').style.display = 'none'; document.getElementById('2408.05151v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 7 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2; C.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.19867">arXiv:2407.19867</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.19867">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Design and Testing for Steel Support Axial Force Servo System </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Ullah%2C+S">Sana Ullah</a>, <a href="/search/eess?searchtype=author&amp;query=Zhou%2C+Y">Yonghong Zhou</a>, <a href="/search/eess?searchtype=author&amp;query=Lai%2C+M">Maokai Lai</a>, <a href="/search/eess?searchtype=author&amp;query=Dong%2C+X">Xiang Dong</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+T">Tao Li</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+X">Xiaoxue Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Y">Yuan Li</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Ting Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.19867v1-abstract-short" style="display: inline;"> Foundation excavations are deepening, expanding, and approaching structures. Steel supports measure and manage axial force. The study regulates steel support structure power during deep excavation using a novel axial force management system for safety, efficiency, and structural integrity. Closed-loop control changes actuator output to maintain axial force based on force. In deep excavation, the s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19867v1-abstract-full').style.display = 'inline'; document.getElementById('2407.19867v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.19867v1-abstract-full" style="display: none;"> Foundation excavations are deepening, expanding, and approaching structures. Steel supports measure and manage axial force. The study regulates steel support structure power during deep excavation using a novel axial force management system for safety, efficiency, and structural integrity. Closed-loop control changes actuator output to maintain axial force based on force. In deep excavation, the servo system regulates unstable soil, side pressure, and structural demands. Modern engineering and tech are used. Temperature changes automatically adjust the jack to maintain axial force. Includes hydraulic jacks, triple-acting cylinders, temperature, and deformation sensors, and automatic control. Foundation pit excavation is dynamic, yet structure tension is constant. There is no scientific way to regulate axial force foundation pit excavation. The revolutionary Servo system adjusts temperature, compression, and axial force to deform pits. System control requires foundation pit direction detection and modification. This engineering method has performed effectively for deep foundation pit excavation at railway crossings and other infrastructure projects. The surrounding protective structure may reduce the steel support&#39;s axial stress, making deep foundation excavation safe and efficient. Keywords: Servo systems, Steel strut support design, Deformation control, Monitoring and control, Deep excavation projects. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19867v1-abstract-full').style.display = 'none'; document.getElementById('2407.19867v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages,7 figures, 1 table, 2 graph, conference paper</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.06612">arXiv:2407.06612</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.06612">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> AI-based Automatic Segmentation of Prostate on Multi-modality Images: A Review </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Jin%2C+R">Rui Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+D">Derun Li</a>, <a href="/search/eess?searchtype=author&amp;query=Xiang%2C+D">Dehui Xiang</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+L">Lei Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Zhou%2C+H">Hailing Zhou</a>, <a href="/search/eess?searchtype=author&amp;query=Shi%2C+F">Fei Shi</a>, <a href="/search/eess?searchtype=author&amp;query=Zhu%2C+W">Weifang Zhu</a>, <a href="/search/eess?searchtype=author&amp;query=Cai%2C+J">Jing Cai</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+X">Xinjian Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.06612v1-abstract-short" style="display: inline;"> Prostate cancer represents a major threat to health. Early detection is vital in reducing the mortality rate among prostate cancer patients. One approach involves using multi-modality (CT, MRI, US, etc.) computer-aided diagnosis (CAD) systems for the prostate region. However, prostate segmentation is challenging due to imperfections in the images and the prostate&#39;s complex tissue structure. The ad&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.06612v1-abstract-full').style.display = 'inline'; document.getElementById('2407.06612v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.06612v1-abstract-full" style="display: none;"> Prostate cancer represents a major threat to health. Early detection is vital in reducing the mortality rate among prostate cancer patients. One approach involves using multi-modality (CT, MRI, US, etc.) computer-aided diagnosis (CAD) systems for the prostate region. However, prostate segmentation is challenging due to imperfections in the images and the prostate&#39;s complex tissue structure. The advent of precision medicine and a significant increase in clinical capacity have spurred the need for various data-driven tasks in the field of medical imaging. Recently, numerous machine learning and data mining tools have been integrated into various medical areas, including image segmentation. This article proposes a new classification method that differentiates supervision types, either in number or kind, during the training phase. Subsequently, we conducted a survey on artificial intelligence (AI)-based automatic prostate segmentation methods, examining the advantages and limitations of each. Additionally, we introduce variants of evaluation metrics for the verification and performance assessment of the segmentation method and summarize the current challenges. Finally, future research directions and development trends are discussed, reflecting the outcomes of our literature survey, suggesting high-precision detection and treatment of prostate cancer as a promising avenue. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.06612v1-abstract-full').style.display = 'none'; document.getElementById('2407.06612v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.03671">arXiv:2407.03671</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.03671">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Spatio-temporal cooperative control Method of Highway Ramp Merge Based on Vehicle-road Coordination </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Xu%2C+X">Xiaoxue Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Lai%2C+M">Maokai Lai</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+H">Haitao Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Dong%2C+X">Xiang Dong</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+T">Tao Li</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+J">Jie Wu</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Y">Yuan Li</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Ting Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.03671v2-abstract-short" style="display: inline;"> The merging area of highway ramps faces multiple challenges, including traffic congestion, collision risks, speed mismatches, driver behavior uncertainties, limited visibility, and bottleneck effects. However, autonomous vehicles engaging in depth coordination between vehicle and road in merging zones, by pre-planning and uploading travel trajectories, can significantly enhance the safety and effi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.03671v2-abstract-full').style.display = 'inline'; document.getElementById('2407.03671v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.03671v2-abstract-full" style="display: none;"> The merging area of highway ramps faces multiple challenges, including traffic congestion, collision risks, speed mismatches, driver behavior uncertainties, limited visibility, and bottleneck effects. However, autonomous vehicles engaging in depth coordination between vehicle and road in merging zones, by pre-planning and uploading travel trajectories, can significantly enhance the safety and efficiency of merging zones.In this paper,we mainly introduce mainline priority cooperation method to achieve the time and space cooperative control of highway merge.Vehicle-mounted intelligent units share real-time vehicle status and driving intentions with Road Section Management Units, which pre-plan the spatiotemporal trajectories of vehicle travel. After receiving these trajectories, Vehicle Intelligent Units strictly adhere to them. Through this deep collaboration between vehicles and roads, conflicts in time and space during vehicle travel are eliminated in advance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.03671v2-abstract-full').style.display = 'none'; document.getElementById('2407.03671v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.08621">arXiv:2405.08621</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.08621">pdf</a>, <a href="https://arxiv.org/format/2405.08621">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> RMT-BVQA: Recurrent Memory Transformer-based Blind Video Quality Assessment for Enhanced Video Content </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tianhao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+C">Chen Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Danier%2C+D">Duolikun Danier</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+F">Fan Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Vallade%2C+B">Benoit Vallade</a>, <a href="/search/eess?searchtype=author&amp;query=Mackin%2C+A">Alex Mackin</a>, <a href="/search/eess?searchtype=author&amp;query=Bull%2C+D">David Bull</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.08621v5-abstract-short" style="display: inline;"> With recent advances in deep learning, numerous algorithms have been developed to enhance video quality, reduce visual artifacts, and improve perceptual quality. However, little research has been reported on the quality assessment of enhanced content - the evaluation of enhancement methods is often based on quality metrics that were designed for compression applications. In this paper, we propose&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.08621v5-abstract-full').style.display = 'inline'; document.getElementById('2405.08621v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.08621v5-abstract-full" style="display: none;"> With recent advances in deep learning, numerous algorithms have been developed to enhance video quality, reduce visual artifacts, and improve perceptual quality. However, little research has been reported on the quality assessment of enhanced content - the evaluation of enhancement methods is often based on quality metrics that were designed for compression applications. In this paper, we propose a novel blind deep video quality assessment (VQA) method specifically for enhanced video content. It employs a new Recurrent Memory Transformer (RMT) based network architecture to obtain video quality representations, which is optimized through a novel content-quality-aware contrastive learning strategy based on a new database containing 13K training patches with enhanced content. The extracted quality representations are then combined through linear regression to generate video-level quality indices. The proposed method, RMT-BVQA, has been evaluated on the VDPVE (VQA Dataset for Perceptual Video Enhancement) database through a five-fold cross validation. The results show its superior correlation performance when compared to ten existing no-reference quality metrics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.08621v5-abstract-full').style.display = 'none'; document.getElementById('2405.08621v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This paper has been accepted by the ECCV 2024 AIM Advances in Image Manipulation workshop</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.19763">arXiv:2403.19763</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.19763">pdf</a>, <a href="https://arxiv.org/format/2403.19763">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Creating Aesthetic Sonifications on the Web with SIREN </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tristan Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Choi%2C+H">Hongchan Choi</a>, <a href="/search/eess?searchtype=author&amp;query=Berger%2C+J">Jonathan Berger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.19763v1-abstract-short" style="display: inline;"> SIREN is a flexible, extensible, and customizable web-based general-purpose interface for auditory data display (sonification). Designed as a digital audio workstation for sonification, synthesizers written in JavaScript using the Web Audio API facilitate intuitive mapping of data to auditory parameters for a wide range of purposes. This paper explores the breadth of sound synthesis techniques s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19763v1-abstract-full').style.display = 'inline'; document.getElementById('2403.19763v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.19763v1-abstract-full" style="display: none;"> SIREN is a flexible, extensible, and customizable web-based general-purpose interface for auditory data display (sonification). Designed as a digital audio workstation for sonification, synthesizers written in JavaScript using the Web Audio API facilitate intuitive mapping of data to auditory parameters for a wide range of purposes. This paper explores the breadth of sound synthesis techniques supported by SIREN, and details the structure and definition of a SIREN synthesizer module. The paper proposes further development that will increase SIREN&#39;s utility. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19763v1-abstract-full').style.display = 'none'; document.getElementById('2403.19763v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 1 figure, 5 listings, submitted to the Web Audio Conference 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.02605">arXiv:2312.02605</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.02605">pdf</a>, <a href="https://arxiv.org/format/2312.02605">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/PCS60826.2024.10566283">10.1109/PCS60826.2024.10566283 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Accelerating Learnt Video Codecs with Gradient Decay and Layer-wise Distillation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tianhao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Gao%2C+G">Ge Gao</a>, <a href="/search/eess?searchtype=author&amp;query=Sun%2C+H">Heming Sun</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+F">Fan Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Bull%2C+D">David Bull</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.02605v1-abstract-short" style="display: inline;"> In recent years, end-to-end learnt video codecs have demonstrated their potential to compete with conventional coding algorithms in term of compression efficiency. However, most learning-based video compression models are associated with high computational complexity and latency, in particular at the decoder side, which limits their deployment in practical applications. In this paper, we present a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02605v1-abstract-full').style.display = 'inline'; document.getElementById('2312.02605v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.02605v1-abstract-full" style="display: none;"> In recent years, end-to-end learnt video codecs have demonstrated their potential to compete with conventional coding algorithms in term of compression efficiency. However, most learning-based video compression models are associated with high computational complexity and latency, in particular at the decoder side, which limits their deployment in practical applications. In this paper, we present a novel model-agnostic pruning scheme based on gradient decay and adaptive layer-wise distillation. Gradient decay enhances parameter exploration during sparsification whilst preventing runaway sparsity and is superior to the standard Straight-Through Estimation. The adaptive layer-wise distillation regulates the sparse training in various stages based on the distortion of intermediate features. This stage-wise design efficiently updates parameters with minimal computational overhead. The proposed approach has been applied to three popular end-to-end learnt video codecs, FVC, DCVC, and DCVC-HEM. Results confirm that our method yields up to 65% reduction in MACs and 2x speed-up with less than 0.3dB drop in BD-PSNR. Supporting code and supplementary material can be downloaded from: https://jasminepp.github.io/lightweightdvc/ <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02605v1-abstract-full').style.display = 'none'; document.getElementById('2312.02605v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> 2312.02605 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.12461">arXiv:2311.12461</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.12461">pdf</a>, <a href="https://arxiv.org/format/2311.12461">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> HiFi-Syn: Hierarchical Granularity Discrimination for High-Fidelity Synthesis of MR Images with Structure Preservation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Yu%2C+Z">Ziqi Yu</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+B">Botao Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+S">Shengjie Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+X">Xiang Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+J">Jianfeng Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tingying Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+X">Xiao-Yong Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.12461v2-abstract-short" style="display: inline;"> Synthesizing medical images while preserving their structural information is crucial in medical research. In such scenarios, the preservation of anatomical content becomes especially important. Although recent advances have been made by incorporating instance-level information to guide translation, these methods overlook the spatial coherence of structural-level representation and the anatomical i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.12461v2-abstract-full').style.display = 'inline'; document.getElementById('2311.12461v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.12461v2-abstract-full" style="display: none;"> Synthesizing medical images while preserving their structural information is crucial in medical research. In such scenarios, the preservation of anatomical content becomes especially important. Although recent advances have been made by incorporating instance-level information to guide translation, these methods overlook the spatial coherence of structural-level representation and the anatomical invariance of content during translation. To address these issues, we introduce hierarchical granularity discrimination, which exploits various levels of semantic information present in medical images. Our strategy utilizes three levels of discrimination granularity: pixel-level discrimination using a Brain Memory Bank, structure-level discrimination on each brain structure with a re-weighting strategy to focus on hard samples, and global-level discrimination to ensure anatomical consistency during translation. The image translation performance of our strategy has been evaluated on three independent datasets (UK Biobank, IXI, and BraTS 2018), and it has outperformed state-of-the-art algorithms. Particularly, our model excels not only in synthesizing normal structures but also in handling abnormal (pathological) structures, such as brain tumors, despite the variations in contrast observed across different imaging modalities due to their pathological characteristics. The diagnostic value of synthesized MR images containing brain tumors has been evaluated by radiologists. This indicates that our model may offer an alternative solution in scenarios where specific MR modalities of patients are unavailable. Extensive experiments further demonstrate the versatility of our method, providing unique insights into medical image translation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.12461v2-abstract-full').style.display = 'none'; document.getElementById('2311.12461v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.02097">arXiv:2310.02097</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.02097">pdf</a>, <a href="https://arxiv.org/format/2310.02097">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Leveraging Classic Deconvolution and Feature Extraction in Zero-Shot Image Restoration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Chobola%2C+T">Tom谩拧 Chobola</a>, <a href="/search/eess?searchtype=author&amp;query=M%C3%BCller%2C+G">Gesine M眉ller</a>, <a href="/search/eess?searchtype=author&amp;query=Dausmann%2C+V">Veit Dausmann</a>, <a href="/search/eess?searchtype=author&amp;query=Theileis%2C+A">Anton Theileis</a>, <a href="/search/eess?searchtype=author&amp;query=Taucher%2C+J">Jan Taucher</a>, <a href="/search/eess?searchtype=author&amp;query=Huisken%2C+J">Jan Huisken</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tingying Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.02097v1-abstract-short" style="display: inline;"> Non-blind deconvolution aims to restore a sharp image from its blurred counterpart given an obtained kernel. Existing deep neural architectures are often built based on large datasets of sharp ground truth images and trained with supervision. Sharp, high quality ground truth images, however, are not always available, especially for biomedical applications. This severely hampers the applicability o&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.02097v1-abstract-full').style.display = 'inline'; document.getElementById('2310.02097v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.02097v1-abstract-full" style="display: none;"> Non-blind deconvolution aims to restore a sharp image from its blurred counterpart given an obtained kernel. Existing deep neural architectures are often built based on large datasets of sharp ground truth images and trained with supervision. Sharp, high quality ground truth images, however, are not always available, especially for biomedical applications. This severely hampers the applicability of current approaches in practice. In this paper, we propose a novel non-blind deconvolution method that leverages the power of deep learning and classic iterative deconvolution algorithms. Our approach combines a pre-trained network to extract deep features from the input image with iterative Richardson-Lucy deconvolution steps. Subsequently, a zero-shot optimisation process is employed to integrate the deconvolved features, resulting in a high-quality reconstructed image. By performing the preliminary reconstruction with the classic iterative deconvolution method, we can effectively utilise a smaller network to produce the final image, thus accelerating the reconstruction whilst reducing the demand for valuable computational resources. Our method demonstrates significant improvements in various real-world applications non-blind deconvolution tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.02097v1-abstract-full').style.display = 'none'; document.getElementById('2310.02097v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.01865">arXiv:2309.01865</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.01865">pdf</a>, <a href="https://arxiv.org/format/2309.01865">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> BigFUSE: Global Context-Aware Image Fusion in Dual-View Light-Sheet Fluorescence Microscopy with Image Formation Prior </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Liu%2C+Y">Yu Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Muller%2C+G">Gesine Muller</a>, <a href="/search/eess?searchtype=author&amp;query=Navab%2C+N">Nassir Navab</a>, <a href="/search/eess?searchtype=author&amp;query=Marr%2C+C">Carsten Marr</a>, <a href="/search/eess?searchtype=author&amp;query=Huisken%2C+J">Jan Huisken</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tingying Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.01865v2-abstract-short" style="display: inline;"> Light-sheet fluorescence microscopy (LSFM), a planar illumination technique that enables high-resolution imaging of samples, experiences defocused image quality caused by light scattering when photons propagate through thick tissues. To circumvent this issue, dualview imaging is helpful. It allows various sections of the specimen to be scanned ideally by viewing the sample from opposing orientatio&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.01865v2-abstract-full').style.display = 'inline'; document.getElementById('2309.01865v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.01865v2-abstract-full" style="display: none;"> Light-sheet fluorescence microscopy (LSFM), a planar illumination technique that enables high-resolution imaging of samples, experiences defocused image quality caused by light scattering when photons propagate through thick tissues. To circumvent this issue, dualview imaging is helpful. It allows various sections of the specimen to be scanned ideally by viewing the sample from opposing orientations. Recent image fusion approaches can then be applied to determine in-focus pixels by comparing image qualities of two views locally and thus yield spatially inconsistent focus measures due to their limited field-of-view. Here, we propose BigFUSE, a global context-aware image fuser that stabilizes image fusion in LSFM by considering the global impact of photon propagation in the specimen while determining focus-defocus based on local image qualities. Inspired by the image formation prior in dual-view LSFM, image fusion is considered as estimating a focus-defocus boundary using Bayes Theorem, where (i) the effect of light scattering onto focus measures is included within Likelihood; and (ii) the spatial consistency regarding focus-defocus is imposed in Prior. The expectation-maximum algorithm is then adopted to estimate the focus-defocus boundary. Competitive experimental results show that BigFUSE is the first dual-view LSFM fuser that is able to exclude structured artifacts when fusing information, highlighting its abilities of automatic image fusion. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.01865v2-abstract-full').style.display = 'none'; document.getElementById('2309.01865v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">paper in MICCAI 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.07708">arXiv:2308.07708</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.07708">pdf</a>, <a href="https://arxiv.org/ps/2308.07708">ps</a>, <a href="https://arxiv.org/format/2308.07708">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> A Real-time Non-contact Localization Method for Faulty Electric Energy Storage Components using Highly Sensitive Magnetometers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tonghui Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Gao%2C+W">Wei Gao</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+Y">Ya Wu</a>, <a href="/search/eess?searchtype=author&amp;query=Ma%2C+Y">Yulong Ma</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+S">Shiwu Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Hu%2C+Y">Yinan Hu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.07708v1-abstract-short" style="display: inline;"> With the wide application of electric energy storage component arrays, such as battery arrays, capacitor arrays, inductor arrays, their potential safety risks have gradually drawn the public attention. However, existing technologies cannot meet the needs of non-contact and real-time diagnosis for faulty components inside these massive arrays. To solve this problem, this paper proposes a new method&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.07708v1-abstract-full').style.display = 'inline'; document.getElementById('2308.07708v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.07708v1-abstract-full" style="display: none;"> With the wide application of electric energy storage component arrays, such as battery arrays, capacitor arrays, inductor arrays, their potential safety risks have gradually drawn the public attention. However, existing technologies cannot meet the needs of non-contact and real-time diagnosis for faulty components inside these massive arrays. To solve this problem, this paper proposes a new method based on the beamforming spatial filtering algorithm to precisely locate the faulty components within the arrays in real-time. The method uses highly sensitive magnetometers to collect the magnetic signals from energy storage component arrays, without damaging or even contacting any component. The experimental results demonstrate the potential of the proposed method in securing energy storage component arrays. Within an imaging area of 80 mm $\times$ 80 mm, the one faulty component out of nine total components can be localized with an accuracy of 0.72 mm for capacitor arrays and 1.60 mm for battery arrays. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.07708v1-abstract-full').style.display = 'none'; document.getElementById('2308.07708v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.01825">arXiv:2212.01825</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2212.01825">pdf</a>, <a href="https://arxiv.org/format/2212.01825">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TMI.2022.3225528">10.1109/TMI.2022.3225528 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> MouseGAN++: Unsupervised Disentanglement and Contrastive Representation for Multiple MRI Modalities Synthesis and Structural Segmentation of Mouse Brain </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Yu%2C+Z">Ziqi Yu</a>, <a href="/search/eess?searchtype=author&amp;query=Han%2C+X">Xiaoyang Han</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+S">Shengjie Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+J">Jianfeng Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tingying Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+X">Xiao-Yong Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.01825v1-abstract-short" style="display: inline;"> Segmenting the fine structure of the mouse brain on magnetic resonance (MR) images is critical for delineating morphological regions, analyzing brain function, and understanding their relationships. Compared to a single MRI modality, multimodal MRI data provide complementary tissue features that can be exploited by deep learning models, resulting in better segmentation results. However, multimodal&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.01825v1-abstract-full').style.display = 'inline'; document.getElementById('2212.01825v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.01825v1-abstract-full" style="display: none;"> Segmenting the fine structure of the mouse brain on magnetic resonance (MR) images is critical for delineating morphological regions, analyzing brain function, and understanding their relationships. Compared to a single MRI modality, multimodal MRI data provide complementary tissue features that can be exploited by deep learning models, resulting in better segmentation results. However, multimodal mouse brain MRI data is often lacking, making automatic segmentation of mouse brain fine structure a very challenging task. To address this issue, it is necessary to fuse multimodal MRI data to produce distinguished contrasts in different brain structures. Hence, we propose a novel disentangled and contrastive GAN-based framework, named MouseGAN++, to synthesize multiple MR modalities from single ones in a structure-preserving manner, thus improving the segmentation performance by imputing missing modalities and multi-modality fusion. Our results demonstrate that the translation performance of our method outperforms the state-of-the-art methods. Using the subsequently learned modality-invariant information as well as the modality-translated images, MouseGAN++ can segment fine brain structures with averaged dice coefficients of 90.0% (T2w) and 87.9% (T1w), respectively, achieving around +10% performance improvement compared to the state-of-the-art algorithms. Our results demonstrate that MouseGAN++, as a simultaneous image synthesis and segmentation method, can be used to fuse cross-modality information in an unpaired manner and yield more robust performance in the absence of multimodal data. We release our method as a mouse brain structural segmentation tool for free academic usage at https://github.com/yu02019. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.01825v1-abstract-full').style.display = 'none'; document.getElementById('2212.01825v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE Transactions on Medical Imaging (IEEE-TMI) 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.15377">arXiv:2209.15377</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.15377">pdf</a>, <a href="https://arxiv.org/format/2209.15377">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DELAD: Deep Landweber-guided deconvolution with Hessian and sparse prior </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Chobola%2C+T">Tomas Chobola</a>, <a href="/search/eess?searchtype=author&amp;query=Theileis%2C+A">Anton Theileis</a>, <a href="/search/eess?searchtype=author&amp;query=Taucher%2C+J">Jan Taucher</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tingying Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.15377v1-abstract-short" style="display: inline;"> We present a model for non-blind image deconvolution that incorporates the classic iterative method into a deep learning application. Instead of using large over-parameterised generative networks to create sharp picture representations, we build our network based on the iterative Landweber deconvolution algorithm, which is integrated with trainable convolutional layers to enhance the recovered ima&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.15377v1-abstract-full').style.display = 'inline'; document.getElementById('2209.15377v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.15377v1-abstract-full" style="display: none;"> We present a model for non-blind image deconvolution that incorporates the classic iterative method into a deep learning application. Instead of using large over-parameterised generative networks to create sharp picture representations, we build our network based on the iterative Landweber deconvolution algorithm, which is integrated with trainable convolutional layers to enhance the recovered image structures and details. Additional to the data fidelity term, we also add Hessian and sparse constraints as regularization terms to improve the image reconstruction quality. Our proposed model is \textit{self-supervised} and converges to a solution based purely on the input blurred image and respective blur kernel without the requirement of any pre-training. We evaluate our technique using standard computer vision benchmarking datasets as well as real microscope images obtained by our enhanced depth-of-field (EDOF) underwater microscope, demonstrating the capabilities of our model in a real-world application. The quantitative results demonstrate that our approach is competitive with state-of-the-art non-blind image deblurring methods despite having a fraction of the parameters and not being pre-trained, demonstrating the efficiency and efficacy of embedding a classic deconvolution approach inside a deep network. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.15377v1-abstract-full').style.display = 'none'; document.getElementById('2209.15377v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.15012">arXiv:2209.15012</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.15012">pdf</a>, <a href="https://arxiv.org/format/2209.15012">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1364/OE.478695">10.1364/OE.478695 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Ghost translation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Ren%2C+W">Wenhan Ren</a>, <a href="/search/eess?searchtype=author&amp;query=Nie%2C+X">Xiaoyu Nie</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Scully%2C+M+O">Marlan O. Scully</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.15012v1-abstract-short" style="display: inline;"> Artificial intelligence has recently been widely used in computational imaging. The deep neural network (DNN) improves the signal-to-noise ratio of the retrieved images, whose quality is otherwise corrupted due to the low sampling ratio or noisy environments. This work proposes a new computational imaging scheme based on the sequence transduction mechanism with the transformer network. The simulat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.15012v1-abstract-full').style.display = 'inline'; document.getElementById('2209.15012v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.15012v1-abstract-full" style="display: none;"> Artificial intelligence has recently been widely used in computational imaging. The deep neural network (DNN) improves the signal-to-noise ratio of the retrieved images, whose quality is otherwise corrupted due to the low sampling ratio or noisy environments. This work proposes a new computational imaging scheme based on the sequence transduction mechanism with the transformer network. The simulation database assists the network in achieving signal translation ability. The experimental single-pixel detector&#39;s signal will be `translated&#39; into a 2D image in an end-to-end manner. High-quality images with no background noise can be retrieved at a sampling ratio as low as 2%. The illumination patterns can be either well-designed speckle patterns for sub-Nyquist imaging or random speckle patterns. Moreover, our method is robust to noise interference. This translation mechanism opens a new direction for DNN-assisted ghost imaging and can be used in various computational imaging scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.15012v1-abstract-full').style.display = 'none'; document.getElementById('2209.15012v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.10669">arXiv:2207.10669</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2207.10669">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> Retinex-qDPC: automatic background rectified quantitative differential phase contrast imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+S">Shuhe Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Ke%2C+Z">Zeyu Ke</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+H">Han Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Berendschot%2C+T+T+J+M">Tos T. J. M. Berendschot</a>, <a href="/search/eess?searchtype=author&amp;query=Zhou%2C+J">Jinhua Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.10669v1-abstract-short" style="display: inline;"> The quality of quantitative differential phase contrast reconstruction (qDPC) can be severely degenerated by the mismatch of the background of two oblique illuminated images, yielding problematic phase recovery results. These background mismatches may result from illumination patterns, inhomogeneous media distribution, or other defocusing layers. In previous reports, the background is manually cal&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.10669v1-abstract-full').style.display = 'inline'; document.getElementById('2207.10669v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.10669v1-abstract-full" style="display: none;"> The quality of quantitative differential phase contrast reconstruction (qDPC) can be severely degenerated by the mismatch of the background of two oblique illuminated images, yielding problematic phase recovery results. These background mismatches may result from illumination patterns, inhomogeneous media distribution, or other defocusing layers. In previous reports, the background is manually calibrated which is time-consuming, and unstable, since new calibrations are needed if any modification to the optical system was made. It is also impossible to calibrate the background from the defocusing layers, or for high dynamic observation as the background changes over time. To tackle the mismatch of background and increases the experimental robustness, we propose the Retinex-qDPC in which we use the images edge features as data fidelity term yielding L2-Retinex-qDPC and L1-Retinex-qDPC for high background-robustness qDPC reconstruction. The split Bregman method is used to solve the L1-Retinex DPC. We compare both Retinex-qDPC models against state-of-the-art DPC reconstruction algorithms including total-variation regularized qDPC, and isotropic-qDPC using both simulated and experimental data. Results show that the Retinex qDPC can significantly improve the phase recovery quality by suppressing the impact of mismatch background. Within, the L1-Retinex-qDPC is better than L2-Retinex and other state-of-the-art DPC algorithms. In general, the Retinex-qDPC increases the experimental robustness against background illumination without any modification of the optical system, which will benefit all qDPC applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.10669v1-abstract-full').style.display = 'none'; document.getElementById('2207.10669v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.13419">arXiv:2206.13419</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.13419">pdf</a>, <a href="https://arxiv.org/format/2206.13419">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> DeStripe: A Self2Self Spatio-Spectral Graph Neural Network with Unfolded Hessian for Stripe Artifact Removal in Light-sheet Microscopy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Liu%2C+Y">Yu Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Weiss%2C+K">Kurt Weiss</a>, <a href="/search/eess?searchtype=author&amp;query=Navab%2C+N">Nassir Navab</a>, <a href="/search/eess?searchtype=author&amp;query=Marr%2C+C">Carsten Marr</a>, <a href="/search/eess?searchtype=author&amp;query=Huisken%2C+J">Jan Huisken</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tingying Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.13419v1-abstract-short" style="display: inline;"> Light-sheet fluorescence microscopy (LSFM) is a cutting-edge volumetric imaging technique that allows for three-dimensional imaging of mesoscopic samples with decoupled illumination and detection paths. Although the selective excitation scheme of such a microscope provides intrinsic optical sectioning that minimizes out-of-focus fluorescence background and sample photodamage, it is prone to light&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.13419v1-abstract-full').style.display = 'inline'; document.getElementById('2206.13419v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.13419v1-abstract-full" style="display: none;"> Light-sheet fluorescence microscopy (LSFM) is a cutting-edge volumetric imaging technique that allows for three-dimensional imaging of mesoscopic samples with decoupled illumination and detection paths. Although the selective excitation scheme of such a microscope provides intrinsic optical sectioning that minimizes out-of-focus fluorescence background and sample photodamage, it is prone to light absorption and scattering effects, which results in uneven illumination and striping artifacts in the images adversely. To tackle this issue, in this paper, we propose a blind stripe artifact removal algorithm in LSFM, called DeStripe, which combines a self-supervised spatio-spectral graph neural network with unfolded Hessian prior. Specifically, inspired by the desirable properties of Fourier transform in condensing striping information into isolated values in the frequency domain, DeStripe firstly localizes the potentially corrupted Fourier coefficients by exploiting the structural difference between unidirectional stripe artifacts and more isotropic foreground images. Affected Fourier coefficients can then be fed into a graph neural network for recovery, with a Hessian regularization unrolled to further ensure structures in the standard image space are well preserved. Since in realistic, stripe-free LSFM barely exists with a standard image acquisition protocol, DeStripe is equipped with a Self2Self denoising loss term, enabling artifact elimination without access to stripe-free ground truth images. Competitive experimental results demonstrate the efficacy of DeStripe in recovering corrupted biomarkers in LSFM with both synthetic and real stripe artifacts. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.13419v1-abstract-full').style.display = 'none'; document.getElementById('2206.13419v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by 25th International Conference on Medical Image Computing and Computer Assisted Intervention</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.13303">arXiv:2112.13303</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.13303">pdf</a>, <a href="https://arxiv.org/format/2112.13303">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1364/PRJ.456156">10.1364/PRJ.456156 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Imaging through scattering media via spatial-temporal encoded pattern illumination </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+X">Xingchen Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Nie%2C+X">Xiaoyu Nie</a>, <a href="/search/eess?searchtype=author&amp;query=Yi%2C+Z">Zhenhuan Yi</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Scully%2C+M+O">Marlan O. Scully</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.13303v1-abstract-short" style="display: inline;"> Optical imaging through scattering media is a long-standing challenge. Although many approaches have been developed to focus light or image objects through scattering media, they are either invasive, restricted to stationary or slowly-moving media, or require high-resolution cameras and complex algorithms to retrieve the images. Here we introduce a computational imaging technique that can overcome&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.13303v1-abstract-full').style.display = 'inline'; document.getElementById('2112.13303v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.13303v1-abstract-full" style="display: none;"> Optical imaging through scattering media is a long-standing challenge. Although many approaches have been developed to focus light or image objects through scattering media, they are either invasive, restricted to stationary or slowly-moving media, or require high-resolution cameras and complex algorithms to retrieve the images. Here we introduce a computational imaging technique that can overcome these restrictions by exploiting spatial-temporal encoded patterns (STEP). We present non-invasive imaging through scattering media with a single-pixel photodetector. We show that the method is insensitive to the motions of media. We further demonstrate that our image reconstruction algorithm is much more efficient than correlation-based algorithms for single-pixel imaging, which may allow fast imaging in currently unreachable scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.13303v1-abstract-full').style.display = 'none'; document.getElementById('2112.13303v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.13293">arXiv:2112.13293</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.13293">pdf</a>, <a href="https://arxiv.org/format/2112.13293">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> Deep-learned speckle pattern and its application to ghost imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Nie%2C+X">Xiaoyu Nie</a>, <a href="/search/eess?searchtype=author&amp;query=Song%2C+H">Haotian Song</a>, <a href="/search/eess?searchtype=author&amp;query=Ren%2C+W">Wenhan Ren</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+X">Xingchen Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+Z">Zhedong Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Scully%2C+M+O">Marlan O. Scully</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.13293v2-abstract-short" style="display: inline;"> In this paper, we present a method for speckle pattern design using deep learning. The speckle patterns possess unique features after experiencing convolutions in Speckle-Net, our well-designed framework for speckle pattern generation. We then apply our method to the computational ghost imaging system. The standard deep learning-assisted ghost imaging methods use the network to recognize the recon&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.13293v2-abstract-full').style.display = 'inline'; document.getElementById('2112.13293v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.13293v2-abstract-full" style="display: none;"> In this paper, we present a method for speckle pattern design using deep learning. The speckle patterns possess unique features after experiencing convolutions in Speckle-Net, our well-designed framework for speckle pattern generation. We then apply our method to the computational ghost imaging system. The standard deep learning-assisted ghost imaging methods use the network to recognize the reconstructed objects or imaging algorithms. In contrast, this innovative application optimizes the illuminating speckle patterns via Speckle-Net with specific sampling ratios. Our method, therefore, outperforms the other techniques for ghost imaging, particularly its ability to retrieve high-quality images with extremely low sampling ratios. It opens a new route towards nontrivial speckle generation by referring to a standard loss function on specified objectives with the modified deep neural network. It also has great potential for applications in the fields of dynamic speckle illumination microscopy, structured illumination microscopy, x-ray imaging, photo-acoustic imaging, and optical lattices. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.13293v2-abstract-full').style.display = 'none'; document.getElementById('2112.13293v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 12 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.03694">arXiv:2112.03694</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.03694">pdf</a>, <a href="https://arxiv.org/format/2112.03694">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TMI.2021.3125459">10.1109/TMI.2021.3125459 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Hard Sample Aware Noise Robust Learning for Histopathology Image Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhu%2C+C">Chuang Zhu</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+W">Wenkai Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Ting Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+Y">Ying Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+M">Mulan Jin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.03694v1-abstract-short" style="display: inline;"> Deep learning-based histopathology image classification is a key technique to help physicians in improving the accuracy and promptness of cancer diagnosis. However, the noisy labels are often inevitable in the complex manual annotation process, and thus mislead the training of the classification model. In this work, we introduce a novel hard sample aware noise robust learning method for histopatho&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.03694v1-abstract-full').style.display = 'inline'; document.getElementById('2112.03694v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.03694v1-abstract-full" style="display: none;"> Deep learning-based histopathology image classification is a key technique to help physicians in improving the accuracy and promptness of cancer diagnosis. However, the noisy labels are often inevitable in the complex manual annotation process, and thus mislead the training of the classification model. In this work, we introduce a novel hard sample aware noise robust learning method for histopathology image classification. To distinguish the informative hard samples from the harmful noisy ones, we build an easy/hard/noisy (EHN) detection model by using the sample training history. Then we integrate the EHN into a self-training architecture to lower the noise rate through gradually label correction. With the obtained almost clean dataset, we further propose a noise suppressing and hard enhancing (NSHE) scheme to train the noise robust model. Compared with the previous works, our method can save more clean samples and can be directly applied to the real-world noisy dataset scenario without using a clean subset. Experimental results demonstrate that the proposed scheme outperforms the current state-of-the-art methods in both the synthetic and real-world noisy datasets. The source code and data are available at https://github.com/bupt-ai-cz/HSA-NRL/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.03694v1-abstract-full').style.display = 'none'; document.getElementById('2112.03694v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 20figures, IEEE Transactions on Medical Imaging</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.0 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.12138">arXiv:2111.12138</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2111.12138">pdf</a>, <a href="https://arxiv.org/format/2111.12138">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> Multi-Modality Microscopy Image Style Transfer for Nuclei Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Liu%2C+Y">Ye Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Wagner%2C+S+J">Sophia J. Wagner</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tingying Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.12138v1-abstract-short" style="display: inline;"> Annotating microscopy images for nuclei segmentation is laborious and time-consuming. To leverage the few existing annotations, also across multiple modalities, we propose a novel microscopy-style augmentation technique based on a generative adversarial network (GAN). Unlike other style transfer methods, it can not only deal with different cell assay types and lighting conditions, but also with di&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.12138v1-abstract-full').style.display = 'inline'; document.getElementById('2111.12138v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.12138v1-abstract-full" style="display: none;"> Annotating microscopy images for nuclei segmentation is laborious and time-consuming. To leverage the few existing annotations, also across multiple modalities, we propose a novel microscopy-style augmentation technique based on a generative adversarial network (GAN). Unlike other style transfer methods, it can not only deal with different cell assay types and lighting conditions, but also with different imaging modalities, such as bright-field and fluorescence microscopy. Using disentangled representations for content and style, we can preserve the structure of the original image while altering its style during augmentation. We evaluate our data augmentation on the 2018 Data Science Bowl dataset consisting of various cell assays, lighting conditions, and imaging modalities. With our style augmentation, the segmentation accuracy of the two top-ranked Mask R-CNN-based nuclei segmentation algorithms in the competition increases significantly. Thus, our augmentation technique renders the downstream task more robust to the test data heterogeneity and helps counteract class imbalance without resampling of minority classes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.12138v1-abstract-full').style.display = 'none'; document.getElementById('2111.12138v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work has been submitted to the IEEE for possible publication</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.08185">arXiv:2111.08185</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2111.08185">pdf</a>, <a href="https://arxiv.org/format/2111.08185">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Graph neural network-based fault diagnosis: a review </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Chen%2C+Z">Zhiwen Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+J">Jiamin Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Alippi%2C+C">Cesare Alippi</a>, <a href="/search/eess?searchtype=author&amp;query=Ding%2C+S+X">Steven X. Ding</a>, <a href="/search/eess?searchtype=author&amp;query=Shardt%2C+Y">Yuri Shardt</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+C">Chunhua Yang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.08185v1-abstract-short" style="display: inline;"> Graph neural network (GNN)-based fault diagnosis (FD) has received increasing attention in recent years, due to the fact that data coming from several application domains can be advantageously represented as graphs. Indeed, this particular representation form has led to superior performance compared to traditional FD approaches. In this review, an easy introduction to GNN, potential applications t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.08185v1-abstract-full').style.display = 'inline'; document.getElementById('2111.08185v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.08185v1-abstract-full" style="display: none;"> Graph neural network (GNN)-based fault diagnosis (FD) has received increasing attention in recent years, due to the fact that data coming from several application domains can be advantageously represented as graphs. Indeed, this particular representation form has led to superior performance compared to traditional FD approaches. In this review, an easy introduction to GNN, potential applications to the field of fault diagnosis, and future perspectives are given. First, the paper reviews neural network-based FD methods by focusing on their data representations, namely, time-series, images, and graphs. Second, basic principles and principal architectures of GNN are introduced, with attention to graph convolutional networks, graph attention networks, graph sample and aggregate, graph auto-encoder, and spatial-temporal graph convolutional networks. Third, the most relevant fault diagnosis methods based on GNN are validated through the detailed experiments, and conclusions are made that the GNN-based methods can achieve good fault diagnosis performance. Finally, discussions and future challenges are provided. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.08185v1-abstract-full').style.display = 'none'; document.getElementById('2111.08185v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 18 figures, 10 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.10435">arXiv:2110.10435</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.10435">pdf</a>, <a href="https://arxiv.org/format/2110.10435">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> RSS-based Multiple Sources Localization with Unknown Log-normal Shadow Fading </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Chu%2C+Y">Yueyan Chu</a>, <a href="/search/eess?searchtype=author&amp;query=Guo%2C+W">Wenbin Guo</a>, <a href="/search/eess?searchtype=author&amp;query=You%2C+K">Kangyong You</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+L">Lei Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+W">Wenbo Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.10435v1-abstract-short" style="display: inline;"> Multi-source localization based on received signal strength (RSS) has drawn great interest in wireless sensor networks. However, the shadow fading term caused by obstacles cannot be separated from the received signal, which leads to severe error in location estimate. In this paper, we approximate the log-normal sum distribution through Fenton-Wilkinson method to formulate a non-convex maximum like&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.10435v1-abstract-full').style.display = 'inline'; document.getElementById('2110.10435v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.10435v1-abstract-full" style="display: none;"> Multi-source localization based on received signal strength (RSS) has drawn great interest in wireless sensor networks. However, the shadow fading term caused by obstacles cannot be separated from the received signal, which leads to severe error in location estimate. In this paper, we approximate the log-normal sum distribution through Fenton-Wilkinson method to formulate a non-convex maximum likelihood (ML) estimator with unknown shadow fading factor. In order to overcome the difficulty in solving the non-convex problem, we propose a novel algorithm to estimate the locations of sources. Specifically, the region is divided into $N$ grids firstly, and the multi-source localization is converted into a sparse recovery problem so that we can obtain the sparse solution. Then we utilize the K-means clustering method to obtain the rough locations of the off-grid sources as the initial feasible point of the ML estimator. Finally, an iterative refinement of the estimated locations is proposed by dynamic updating of the localization dictionary. The proposed algorithm can efficiently approach a superior local optimal solution of the ML estimator. It is shown from the simulation results that the proposed method has a promising localization performance and improves the robustness for multi-source localization in unknown shadow fading environments. Moreover, the proposed method provides a better computational complexity from $O(K^3N^3)$ to $O(N^3)$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.10435v1-abstract-full').style.display = 'none'; document.getElementById('2110.10435v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 10 figures. arXiv admin note: substantial text overlap with arXiv:2105.15097</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.07673">arXiv:2108.07673</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2108.07673">pdf</a>, <a href="https://arxiv.org/format/2108.07673">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.optcom.2022.128450">10.1016/j.optcom.2022.128450 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> 0.8% Nyquist computational ghost imaging via non-experimental deep learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Song%2C+H">Haotian Song</a>, <a href="/search/eess?searchtype=author&amp;query=Nie%2C+X">Xiaoyu Nie</a>, <a href="/search/eess?searchtype=author&amp;query=Su%2C+H">Hairong Su</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+H">Hui Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Zhou%2C+Y">Yu Zhou</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+X">Xingchen Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Scully%2C+M+O">Marlan O. Scully</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.07673v1-abstract-short" style="display: inline;"> We present a framework for computational ghost imaging based on deep learning and customized pink noise speckle patterns. The deep neural network in this work, which can learn the sensing model and enhance image reconstruction quality, is trained merely by simulation. To demonstrate the sub-Nyquist level in our work, the conventional computational ghost imaging results, reconstructed imaging resul&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.07673v1-abstract-full').style.display = 'inline'; document.getElementById('2108.07673v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.07673v1-abstract-full" style="display: none;"> We present a framework for computational ghost imaging based on deep learning and customized pink noise speckle patterns. The deep neural network in this work, which can learn the sensing model and enhance image reconstruction quality, is trained merely by simulation. To demonstrate the sub-Nyquist level in our work, the conventional computational ghost imaging results, reconstructed imaging results using white noise and pink noise via deep learning are compared under multiple sampling rates at different noise conditions. We show that the proposed scheme can provide high-quality images with a sampling rate of 0.8% even when the object is outside the training dataset, and it is robust to noisy environments. This method is excellent for various applications, particularly those that require a low sampling rate, fast reconstruction efficiency, or experience strong noise interference. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.07673v1-abstract-full').style.display = 'none'; document.getElementById('2108.07673v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.12357">arXiv:2107.12357</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.12357">pdf</a>, <a href="https://arxiv.org/format/2107.12357">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Structure-Preserving Multi-Domain Stain Color Augmentation using Style-Transfer with Disentangled Representations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Wagner%2C+S+J">Sophia J. Wagner</a>, <a href="/search/eess?searchtype=author&amp;query=Khalili%2C+N">Nadieh Khalili</a>, <a href="/search/eess?searchtype=author&amp;query=Sharma%2C+R">Raghav Sharma</a>, <a href="/search/eess?searchtype=author&amp;query=Boxberg%2C+M">Melanie Boxberg</a>, <a href="/search/eess?searchtype=author&amp;query=Marr%2C+C">Carsten Marr</a>, <a href="/search/eess?searchtype=author&amp;query=de+Back%2C+W">Walter de Back</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tingying Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.12357v1-abstract-short" style="display: inline;"> In digital pathology, different staining procedures and scanners cause substantial color variations in whole-slide images (WSIs), especially across different laboratories. These color shifts result in a poor generalization of deep learning-based methods from the training domain to external pathology data. To increase test performance, stain normalization techniques are used to reduce the variance&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.12357v1-abstract-full').style.display = 'inline'; document.getElementById('2107.12357v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.12357v1-abstract-full" style="display: none;"> In digital pathology, different staining procedures and scanners cause substantial color variations in whole-slide images (WSIs), especially across different laboratories. These color shifts result in a poor generalization of deep learning-based methods from the training domain to external pathology data. To increase test performance, stain normalization techniques are used to reduce the variance between training and test domain. Alternatively, color augmentation can be applied during training leading to a more robust model without the extra step of color normalization at test time. We propose a novel color augmentation technique, HistAuGAN, that can simulate a wide variety of realistic histology stain colors, thus making neural networks stain-invariant when applied during training. Based on a generative adversarial network (GAN) for image-to-image translation, our model disentangles the content of the image, i.e., the morphological tissue structure, from the stain color attributes. It can be trained on multiple domains and, therefore, learns to cover different stain colors as well as other domain-specific variations introduced in the slide preparation and imaging process. We demonstrate that HistAuGAN outperforms conventional color augmentation techniques on a classification task on the publicly available dataset Camelyon17 and show that it is able to mitigate present batch effects. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.12357v1-abstract-full').style.display = 'none'; document.getElementById('2107.12357v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted at MICCAI 2021, code and model weights are available at http://github.com/sophiajw/HistAuGAN</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.06079">arXiv:2102.06079</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.06079">pdf</a>, <a href="https://arxiv.org/format/2102.06079">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Superresolving second-order correlation imaging using synthesized colored noise speckles </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Li%2C+Z">Zheng Li</a>, <a href="/search/eess?searchtype=author&amp;query=Nie%2C+X">Xiaoyu Nie</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+F">Fan Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+X">Xiangpei Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+D">Dongyu Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Dong%2C+X">Xiaolong Dong</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+X">Xingchen Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Zubairy%2C+M+S">M. Suhail Zubairy</a>, <a href="/search/eess?searchtype=author&amp;query=Scully%2C+M+O">Marlan O. Scully</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.06079v1-abstract-short" style="display: inline;"> We present a novel method to synthesize non-trivial speckles that can enable superresolving second-order correlation imaging. The speckles acquire a unique anti-correlation in the spatial intensity fluctuation by introducing the blue noise spectrum to the input light fields through amplitude modulation. Illuminating objects with the blue noise speckle patterns can lead to a sub-diffraction limit i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.06079v1-abstract-full').style.display = 'inline'; document.getElementById('2102.06079v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.06079v1-abstract-full" style="display: none;"> We present a novel method to synthesize non-trivial speckles that can enable superresolving second-order correlation imaging. The speckles acquire a unique anti-correlation in the spatial intensity fluctuation by introducing the blue noise spectrum to the input light fields through amplitude modulation. Illuminating objects with the blue noise speckle patterns can lead to a sub-diffraction limit imaging system with a resolution more than three times higher than first-order imaging, which is comparable to the resolving power of ninth order correlation imaging with thermal light. Our method opens a new route towards non-trivial speckle generation by tailoring amplitudes of the input light fields and provides a versatile scheme for constructing superresolving imaging and microscopy systems without invoking complicated higher-order correlations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.06079v1-abstract-full').style.display = 'none'; document.getElementById('2102.06079v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.07284">arXiv:2012.07284</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2012.07284">pdf</a>, <a href="https://arxiv.org/format/2012.07284">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Moving Object Captured with Pink Noise Pattern in Computational Ghost Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Nie%2C+X">Xiaoyu Nie</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+X">Xingchen Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Scully%2C+M+O">Marlan O. Scully</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.07284v2-abstract-short" style="display: inline;"> We develop and experimentally demonstrate an imaging method based on the pink noise pattern in the computational ghost imaging (CGI) system, which has a strong ability to photograph moving objects. To examine its unique ability and scope of application, the object oscillates with variable amplitude in horizontal axis, and the result via commonly used white noise are also measured as a comparison.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.07284v2-abstract-full').style.display = 'inline'; document.getElementById('2012.07284v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.07284v2-abstract-full" style="display: none;"> We develop and experimentally demonstrate an imaging method based on the pink noise pattern in the computational ghost imaging (CGI) system, which has a strong ability to photograph moving objects. To examine its unique ability and scope of application, the object oscillates with variable amplitude in horizontal axis, and the result via commonly used white noise are also measured as a comparison. We show that our method can image the object when the white noise method fails. In addition, our method uses less number of patterns, and enhances the signal-to-noise ratio (SNR) to a great extent. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.07284v2-abstract-full').style.display = 'none'; document.getElementById('2012.07284v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.07250">arXiv:2012.07250</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2012.07250">pdf</a>, <a href="https://arxiv.org/format/2012.07250">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1103/PhysRevA.105.043525">10.1103/PhysRevA.105.043525 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Sub-Nyquist computational ghost imaging with orthonormalized colored noise pattern </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Nie%2C+X">Xiaoyu Nie</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+X">Xingchen Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Scully%2C+M+O">Marlan O. Scully</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.07250v3-abstract-short" style="display: inline;"> Computational ghost imaging generally requires a large number of pattern illumination to obtain a high-quality image. The colored noise speckle pattern was recently proposed to substitute the white noise pattern in a variety of noisy environments and gave a significant signal-to-noise ratio enhancement even with a limited number of patterns. We propose and experimentally demonstrate here an orthon&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.07250v3-abstract-full').style.display = 'inline'; document.getElementById('2012.07250v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.07250v3-abstract-full" style="display: none;"> Computational ghost imaging generally requires a large number of pattern illumination to obtain a high-quality image. The colored noise speckle pattern was recently proposed to substitute the white noise pattern in a variety of noisy environments and gave a significant signal-to-noise ratio enhancement even with a limited number of patterns. We propose and experimentally demonstrate here an orthonormalization approach based on the colored noise patterns to achieve sub-Nyquist computational ghost imaging. We tested the reconstructed image in quality indicators such as the contrast-to-noise ratio, the mean square error, the peak signal to noise ratio, and the correlation coefficient. The results suggest that our method can provide high-quality images while using a sampling ratio an order lower than the conventional methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.07250v3-abstract-full').style.display = 'none'; document.getElementById('2012.07250v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.14390">arXiv:2009.14390</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.14390">pdf</a>, <a href="https://arxiv.org/format/2009.14390">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1103/PhysRevA.104.013513">10.1103/PhysRevA.104.013513 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Anti-interference Computational Ghost Imaging with Pink Noise Speckle Patterns </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Nie%2C+X">Xiaoyu Nie</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+F">Fan Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+X">Xiangpei Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+X">Xingchen Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Nessler%2C+R">Reed Nessler</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Zubairy%2C+M+S">M. Suhail Zubairy</a>, <a href="/search/eess?searchtype=author&amp;query=Scully%2C+M+O">Marlan O. Scully</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.14390v3-abstract-short" style="display: inline;"> We propose a computational ghost imaging scheme using customized pink noise speckle pattern illumination. By modulating the spatial frequency amplitude of the speckles, we generate speckle patterns with a significant positive spatial correlation. We experimentally reconstruct images using our synthesized speckle patterns in the presence of a variety of noise sources and pattern distortion and show&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.14390v3-abstract-full').style.display = 'inline'; document.getElementById('2009.14390v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.14390v3-abstract-full" style="display: none;"> We propose a computational ghost imaging scheme using customized pink noise speckle pattern illumination. By modulating the spatial frequency amplitude of the speckles, we generate speckle patterns with a significant positive spatial correlation. We experimentally reconstruct images using our synthesized speckle patterns in the presence of a variety of noise sources and pattern distortion and shown it is robust to noise interference. The results are compared with the use of standard white noise speckle patterns. We show that our method gives good image qualities under different noise interference situations while the traditional way fails. The proposed scheme promises potential applications in underwater, dynamic, and moving target computational ghost imaging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.14390v3-abstract-full').style.display = 'none'; document.getElementById('2009.14390v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 6 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Phys. Rev. A 104, 013513 (2021) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.11641">arXiv:2007.11641</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2007.11641">pdf</a>, <a href="https://arxiv.org/format/2007.11641">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Attention based Multiple Instance Learning for Classification of Blood Cell Disorders </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Sadafi%2C+A">Ario Sadafi</a>, <a href="/search/eess?searchtype=author&amp;query=Makhro%2C+A">Asya Makhro</a>, <a href="/search/eess?searchtype=author&amp;query=Bogdanova%2C+A">Anna Bogdanova</a>, <a href="/search/eess?searchtype=author&amp;query=Navab%2C+N">Nassir Navab</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tingying Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Albarqouni%2C+S">Shadi Albarqouni</a>, <a href="/search/eess?searchtype=author&amp;query=Marr%2C+C">Carsten Marr</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.11641v1-abstract-short" style="display: inline;"> Red blood cells are highly deformable and present in various shapes. In blood cell disorders, only a subset of all cells is morphologically altered and relevant for the diagnosis. However, manually labeling of all cells is laborious, complicated and introduces inter-expert variability. We propose an attention based multiple instance learning method to classify blood samples of patients suffering f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.11641v1-abstract-full').style.display = 'inline'; document.getElementById('2007.11641v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.11641v1-abstract-full" style="display: none;"> Red blood cells are highly deformable and present in various shapes. In blood cell disorders, only a subset of all cells is morphologically altered and relevant for the diagnosis. However, manually labeling of all cells is laborious, complicated and introduces inter-expert variability. We propose an attention based multiple instance learning method to classify blood samples of patients suffering from blood cell disorders. Cells are detected using an R-CNN architecture. With the features extracted for each cell, a multiple instance learning method classifies patient samples into one out of four blood cell disorders. The attention mechanism provides a measure of the contribution of each cell to the overall classification and significantly improves the network&#39;s classification accuracy as well as its interpretability for the medical expert. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.11641v1-abstract-full').style.display = 'none'; document.getElementById('2007.11641v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.15954">arXiv:2006.15954</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2006.15954">pdf</a>, <a href="https://arxiv.org/format/2006.15954">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Multi-level colonoscopy malignant tissue detection with adversarial CAC-UNet </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhu%2C+C">Chuang Zhu</a>, <a href="/search/eess?searchtype=author&amp;query=Mei%2C+K">Ke Mei</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Ting Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Luo%2C+Y">Yihao Luo</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+J">Jun Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+Y">Ying Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+M">Mulan Jin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.15954v2-abstract-short" style="display: inline;"> The automatic and objective medical diagnostic model can be valuable to achieve early cancer detection, and thus reducing the mortality rate. In this paper, we propose a highly efficient multi-level malignant tissue detection through the designed adversarial CAC-UNet. A patch-level model with a pre-prediction strategy and a malignancy area guided label smoothing is adopted to remove the negative W&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.15954v2-abstract-full').style.display = 'inline'; document.getElementById('2006.15954v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.15954v2-abstract-full" style="display: none;"> The automatic and objective medical diagnostic model can be valuable to achieve early cancer detection, and thus reducing the mortality rate. In this paper, we propose a highly efficient multi-level malignant tissue detection through the designed adversarial CAC-UNet. A patch-level model with a pre-prediction strategy and a malignancy area guided label smoothing is adopted to remove the negative WSIs, with which to lower the risk of false positive detection. For the selected key patches by multi-model ensemble, an adversarial context-aware and appearance consistency UNet (CAC-UNet) is designed to achieve robust segmentation. In CAC-UNet, mirror designed discriminators are able to seamlessly fuse the whole feature maps of the skillfully designed powerful backbone network without any information loss. Besides, a mask prior is further added to guide the accurate segmentation mask prediction through an extra mask-domain discriminator. The proposed scheme achieves the best results in MICCAI DigestPath2019 challenge on colonoscopy tissue segmentation and classification task. The full implementation details and the trained models are available at https://github.com/Raykoooo/CAC-UNet. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.15954v2-abstract-full').style.display = 'none'; document.getElementById('2006.15954v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted by Neurocomputing; winner of the MICCAI DigestPath 2019 challenge on colonoscopy tissue segmentation and classification task</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.08021">arXiv:1911.08021</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1911.08021">pdf</a>, <a href="https://arxiv.org/format/1911.08021">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TSP.2020.3009875">10.1109/TSP.2020.3009875 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Parametric Sparse Bayesian Dictionary Learning for Multiple Sources Localization with Propagation Parameters Uncertainty and Nonuniform Noise </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=You%2C+K">Kangyong You</a>, <a href="/search/eess?searchtype=author&amp;query=Guo%2C+W">Wenbin Guo</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+Y">Yueliang Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Zuo%2C+P">Peiliang Zuo</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+W">Wenbo Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.08021v2-abstract-short" style="display: inline;"> Received signal strength (RSS) based source localization method is popular due to its simplicity and low cost. However, this method is highly dependent on the propagation model which is not easy to be captured in practice. Moreover, most existing works only consider the single source and the identical measurement noise scenario, while in practice multiple co-channel sources may transmit simultaneo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.08021v2-abstract-full').style.display = 'inline'; document.getElementById('1911.08021v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.08021v2-abstract-full" style="display: none;"> Received signal strength (RSS) based source localization method is popular due to its simplicity and low cost. However, this method is highly dependent on the propagation model which is not easy to be captured in practice. Moreover, most existing works only consider the single source and the identical measurement noise scenario, while in practice multiple co-channel sources may transmit simultaneously, and the measurement noise tends to be nonuniform. In this paper, we study the multiple co-channel sources localization (MSL) problem under unknown nonuniform noise, while jointly estimating the parametric propagation model. Specifically, we model the MSL problem as being parameterized by the unknown source locations and propagation parameters, and then reformulate it as a joint parametric sparsifying dictionary learning (PSDL) and sparse signal recovery (SSR) problem which is solved under the framework of sparse Bayesian learning with iterative parametric dictionary approximation. Furthermore, multiple snapshot measurements are utilized to improve the localization accuracy, and the Cramer-Rao lower bound (CRLB) is derived to analyze the theoretical estimation error bound. Comparing with the state-of-the-art sparsity-based MSL algorithms as well as CRLB, extensive simulations show the importance of jointly inferring the propagation parameters,and highlight the effectiveness and superiority of the proposed method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.08021v2-abstract-full').style.display = 'none'; document.getElementById('1911.08021v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 December, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.08018">arXiv:1911.08018</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1911.08018">pdf</a>, <a href="https://arxiv.org/format/1911.08018">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TSIPN.2020.3038475">10.1109/TSIPN.2020.3038475 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Graph Learning for Spatiotemporal Signals with Long- and Short-Term Characterization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Liu%2C+Y">Yueliang Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Guo%2C+W">Wenbin Guo</a>, <a href="/search/eess?searchtype=author&amp;query=You%2C+K">Kangyong You</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+L">Lei Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tao Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+W">Wenbo Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.08018v2-abstract-short" style="display: inline;"> Mining natural associations from high-dimensional spatiotemporal signals plays an important role in various fields including biology, climatology, and financial analysis. However, most existing works have mainly studied time-independent signals without considering the correlations of spatiotemporal signals that achieve high learning accuracy. This paper aims to learn graphs that better reflect und&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.08018v2-abstract-full').style.display = 'inline'; document.getElementById('1911.08018v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.08018v2-abstract-full" style="display: none;"> Mining natural associations from high-dimensional spatiotemporal signals plays an important role in various fields including biology, climatology, and financial analysis. However, most existing works have mainly studied time-independent signals without considering the correlations of spatiotemporal signals that achieve high learning accuracy. This paper aims to learn graphs that better reflect underlying data relations by leveraging the long- and short-term characteristics of spatiotemporal signals. First, a spatiotemporal signal model is presented that considers both spatial and temporal relations. In particular, we integrate a low-rank representation and a Gaussian Markov process to describe the temporal correlations. Then, the graph learning problem is formulated as a joint low-rank component estimation and graph Laplacian inference. Accordingly, we propose a low rank and spatiotemporal smoothness-based graph learning method (GL-LRSS), which introduces a spatiotemporal smoothness prior into time-vertex signal analysis. By jointly exploiting the low rank of long-time observations and the smoothness of short-time observations, the overall learning performance can be effectively improved. Experiments on both synthetic and real-world datasets demonstrate substantial improvements in the learning accuracy of the proposed method over the state-of-the-art low-rank component estimation and graph learning methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.08018v2-abstract-full').style.display = 'none'; document.getElementById('1911.08018v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 6 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Transactions on Signal and Information Processing over Networks, vol 6, pp. 699-713, 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.08778">arXiv:1907.08778</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1907.08778">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/2040-8986/aba0fc">10.1088/2040-8986/aba0fc <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Retrieval of non-sparse object through scattering media beyond the memory effect </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhou%2C+M">Meiling Zhou</a>, <a href="/search/eess?searchtype=author&amp;query=Pan%2C+A">An Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+R">Runze Li</a>, <a href="/search/eess?searchtype=author&amp;query=Liang%2C+Y">Yansheng Liang</a>, <a href="/search/eess?searchtype=author&amp;query=Min%2C+J">Junwei Min</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tong Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Bai%2C+C">Chen Bai</a>, <a href="/search/eess?searchtype=author&amp;query=Yao%2C+B">Baoli Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.08778v1-abstract-short" style="display: inline;"> Optical imaging through scattering media is a commonly confronted with the problem of reconstruction of complex objects and optical memory effect. To solve the problem, here, we propose a novel configuration based on the combination of ptychography and shower-curtain effect, which enables the retrieval of non-sparse samples through scattering media beyond the memory effect. Furthermore, by virtue&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.08778v1-abstract-full').style.display = 'inline'; document.getElementById('1907.08778v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.08778v1-abstract-full" style="display: none;"> Optical imaging through scattering media is a commonly confronted with the problem of reconstruction of complex objects and optical memory effect. To solve the problem, here, we propose a novel configuration based on the combination of ptychography and shower-curtain effect, which enables the retrieval of non-sparse samples through scattering media beyond the memory effect. Furthermore, by virtue of the shower-curtain effect, the proposed imaging system is insensitive to dynamic scattering media. Results from the retrieval of hair follicle section demonstrate the effectiveness and feasibility of the proposed method. The field of view is improved to 2.64mm. This present technique will be a potential approach for imaging through deep biological tissue. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.08778v1-abstract-full').style.display = 'none'; document.getElementById('1907.08778v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.05644">arXiv:1904.05644</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1904.05644">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Retinal Vessels Segmentation Based on Dilated Multi-Scale Convolutional Neural Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Jiang%2C+Y">Yun Jiang</a>, <a href="/search/eess?searchtype=author&amp;query=Tan%2C+N">Ning Tan</a>, <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tingting Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+H">Hai Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.05644v1-abstract-short" style="display: inline;"> Accurate segmentation of retinal vessels is a basic step in Diabetic retinopathy(DR) detection. Most methods based on deep convolutional neural network (DCNN) have small receptive fields, and hence they are unable to capture global context information of larger regions, with difficult to identify lesions. The final segmented retina vessels contain more noise with low classification accuracy. There&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.05644v1-abstract-full').style.display = 'inline'; document.getElementById('1904.05644v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.05644v1-abstract-full" style="display: none;"> Accurate segmentation of retinal vessels is a basic step in Diabetic retinopathy(DR) detection. Most methods based on deep convolutional neural network (DCNN) have small receptive fields, and hence they are unable to capture global context information of larger regions, with difficult to identify lesions. The final segmented retina vessels contain more noise with low classification accuracy. Therefore, in this paper, we propose a DCNN structure named as D-Net. In the proposed D-Net, the dilation convolution is used in the backbone network to obtain a larger receptive field without losing spatial resolution, so as to reduce the loss of feature information and to reduce the difficulty of tiny thin vessels segmentation. The large receptive field can better distinguished between the lesion area and the blood vessel area. In the proposed Multi-Scale Information Fusion module (MSIF), parallel convolution layers with different dilation rates are used, so that the model can obtain more dense feature information and better capture retinal vessel information of different sizes. In the decoding module, the skip layer connection is used to propagate context information to higher resolution layers, so as to prevent low-level information from passing the entire network structure. Finally, our method was verified on DRIVE, STARE and CHASE dataset. The experimental results show that our network structure outperforms some state-of-art method, such as N4-fields, U-Net, and DRIU in terms of accuracy, sensitivity, specificity, and AUCROC. Particularly, D-Net outperforms U-Net by 1.04%, 1.23% and 2.79% in DRIVE, STARE, and CHASE three dataset, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.05644v1-abstract-full').style.display = 'none'; document.getElementById('1904.05644v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.05786">arXiv:1805.05786</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1805.05786">pdf</a>, <a href="https://arxiv.org/ps/1805.05786">ps</a>, <a href="https://arxiv.org/format/1805.05786">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> An Adaptive Optimal Mapping Selection Algorithm for PNC using Variable QAM Modulation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tong Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+Y">Yi Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Burr%2C+A+G">Alister G. Burr</a>, <a href="/search/eess?searchtype=author&amp;query=Shikh-Bahaei%2C+M">Mohammad Shikh-Bahaei</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.05786v1-abstract-short" style="display: inline;"> Fifth generation (5G) wireless networks will need to serve much higher user densities than existing 4G networks, and will therefore require an enhanced radio access network (RAN) infrastructure. Physical layer network coding (PNC) has been shown to enable such high densities with much lower backhaul load than approaches such as Cloud-RAN and coordinated multipoint (CoMP). In this letter, we presen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.05786v1-abstract-full').style.display = 'inline'; document.getElementById('1805.05786v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.05786v1-abstract-full" style="display: none;"> Fifth generation (5G) wireless networks will need to serve much higher user densities than existing 4G networks, and will therefore require an enhanced radio access network (RAN) infrastructure. Physical layer network coding (PNC) has been shown to enable such high densities with much lower backhaul load than approaches such as Cloud-RAN and coordinated multipoint (CoMP). In this letter, we present an engineering applicable PNC scheme which allows different cooperating users to use different modulation schemes, according to the relative strength of their channels to a given access point. This is in contrast with compute-and-forward and previous PNC schemes which are designed for two-way relay channel. A two-stage search algorithm to identify the optimum PNC mappings for given channel state information and modulation is proposed in this letter. Numerical results show that the proposed scheme achieves low bit error rate with reduced backhaul load. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.05786v1-abstract-full').style.display = 'none'; document.getElementById('1805.05786v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.00436">arXiv:1805.00436</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1805.00436">pdf</a>, <a href="https://arxiv.org/ps/1805.00436">ps</a>, <a href="https://arxiv.org/format/1805.00436">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> A Physical Layer Network Coding Design for 5G Network MIMO </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tong Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+Y">Yi Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Burr%2C+A+G">Alister G. Burr</a>, <a href="/search/eess?searchtype=author&amp;query=Shikh-Bahaei%2C+M">Mohammad Shikh-Bahaei</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.00436v1-abstract-short" style="display: inline;"> This paper presents a physical layer network coding (PNC) approach for network MIMO (N-MIMO) systems to release the heavy burden of backhaul load. The proposed PNC approach is applied for uplink scenario in binary systems, and the design guideline serves multiple mobile terminals (MTs) and guarantees unambiguous recovery of the message from each MT. We present a novel PNC design criterion first ba&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.00436v1-abstract-full').style.display = 'inline'; document.getElementById('1805.00436v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.00436v1-abstract-full" style="display: none;"> This paper presents a physical layer network coding (PNC) approach for network MIMO (N-MIMO) systems to release the heavy burden of backhaul load. The proposed PNC approach is applied for uplink scenario in binary systems, and the design guideline serves multiple mobile terminals (MTs) and guarantees unambiguous recovery of the message from each MT. We present a novel PNC design criterion first based on binary matrix theories, followed by an adaptive optimal mapping selection algorithm based on the proposed design criterion. In order to reduce the real-time computational complexity, a two-stage search algorithm for the optimal binary PNC mapping matrix is developed. Numerical results show that the proposed scheme achieves lower outage probability with reduced backhaul load compared to practical CoMP schemes which quantize the estimated symbols from a log-likelihood ratio (LLR) based multiuser detector into binary bits at each access point (AP). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.00436v1-abstract-full').style.display = 'none'; document.getElementById('1805.00436v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: text overlap with arXiv:1801.07061</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1801.07061">arXiv:1801.07061</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1801.07061">pdf</a>, <a href="https://arxiv.org/ps/1801.07061">ps</a>, <a href="https://arxiv.org/format/1801.07061">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Wireless Network Coding in Network MIMO: A New Design for 5G and Beyond </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Peng%2C+T">Tong Peng</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+Y">Yi Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Burr%2C+A+G">Alister G. Burr</a>, <a href="/search/eess?searchtype=author&amp;query=Shikh-Bahaei%2C+M">Mohammad Shikh-Bahaei</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1801.07061v3-abstract-short" style="display: inline;"> Physical layer network coding (PNC) has been studied to serve wireless network MIMO systems with much lower backhaul load than approaches such as Cloud Radio Access Network (Cloud-RAN) and coordinated multipoint (CoMP). In this paper, we present a design guideline of engineering applicable PNC to fulfil the request of high user densities in 5G wireless RAN infrastructure. Unlike compute-and-forwar&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1801.07061v3-abstract-full').style.display = 'inline'; document.getElementById('1801.07061v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1801.07061v3-abstract-full" style="display: none;"> Physical layer network coding (PNC) has been studied to serve wireless network MIMO systems with much lower backhaul load than approaches such as Cloud Radio Access Network (Cloud-RAN) and coordinated multipoint (CoMP). In this paper, we present a design guideline of engineering applicable PNC to fulfil the request of high user densities in 5G wireless RAN infrastructure. Unlike compute-and-forward and PNC design criteria for two-way relay channels, the proposed guideline is designed for uplink of network MIMO (N-MIMO) systems. We show that the proposed design criteria guarantee that 1) the whole system operates over binary system; 2) the PNC functions utilised at each access point overcome all singular fade states; 3) the destination can unambiguously recover all source messages while the overall backhaul load remains at the lowest level. We then develop a two-stage search algorithm to identify the optimum PNC mapping functions which greatly reduces the real-time computational complexity. The impact of estimated channel information and reduced number of singular fade states in different QAM modulation schemes is studied in this paper. In addition, a sub-optimal search method based on lookup table mechanism to achieve further reduced computational complexity with limited performance loss is presented. Numerical results show that the proposed schemes achieve low outage probability with reduced backhaul load. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1801.07061v3-abstract-full').style.display = 'none'; document.getElementById('1801.07061v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 January, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2018. </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10