CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–50 of 52 results for author: <span class="mathjax">Bai, B</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/physics" aria-role="search"> Searching in archive <strong>physics</strong>. <a href="/search/?searchtype=author&query=Bai%2C+B">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Bai, B"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Bai%2C+B&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Bai, B"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Bai%2C+B&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Bai%2C+B&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Bai%2C+B&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04210">arXiv:2411.04210</a> <span> [<a href="https://arxiv.org/pdf/2411.04210">pdf</a>, <a href="https://arxiv.org/format/2411.04210">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Accelerator Physics">physics.acc-ph</span> </div> </div> <p class="title is-5 mathjax"> Monochromatization interaction region optics design for direct s-channel Higgs production at FCC-ee </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Zhang%2C+Z">Z. Zhang</a>, <a href="/search/physics?searchtype=author&query=Faus-Golfe%2C+A">A. Faus-Golfe</a>, <a href="/search/physics?searchtype=author&query=Korsun%2C+A">A. Korsun</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">B. Bai</a>, <a href="/search/physics?searchtype=author&query=Jiang%2C+H">H. Jiang</a>, <a href="/search/physics?searchtype=author&query=Oide%2C+K">K. Oide</a>, <a href="/search/physics?searchtype=author&query=Raimondi%2C+P">P. Raimondi</a>, <a href="/search/physics?searchtype=author&query=d%27Enterria%2C+D">D. d'Enterria</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+S">S. Zhang</a>, <a href="/search/physics?searchtype=author&query=Zhou%2C+Z">Z. Zhou</a>, <a href="/search/physics?searchtype=author&query=Chi%2C+Y">Y. Chi</a>, <a href="/search/physics?searchtype=author&query=Zimmermann%2C+F">F. Zimmermann</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04210v1-abstract-short" style="display: inline;"> The FCC-ee offers the potential to measure the electron Yukawa coupling via direct s-channel Higgs production, e+ e- -> H, at a centre-of-mass (CM) energy of 125 GeV. This measurement is significantly facilitated if the CM energy spread of e+ e- collisions can be reduced to a level comparable to the natural width of the Higgs boson, 螕_H = 4.1 MeV, without substantial loss in luminosity. Achieving… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04210v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04210v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04210v1-abstract-full" style="display: none;"> The FCC-ee offers the potential to measure the electron Yukawa coupling via direct s-channel Higgs production, e+ e- -> H, at a centre-of-mass (CM) energy of 125 GeV. This measurement is significantly facilitated if the CM energy spread of e+ e- collisions can be reduced to a level comparable to the natural width of the Higgs boson, 螕_H = 4.1 MeV, without substantial loss in luminosity. Achieving this reduction in collision-energy spread is possible through the "monochromatization" concept. The basic idea is to create opposite correlations between spatial position and energy deviation within the colliding beams, which can be accomplished in beam optics by introducing a nonzero dispersion function with opposite signs for the two beams at the interaction point. Since the first proposal in 2016, the implementation of monochromatization at the FCC-ee has been continuously improved, starting from preliminary parametric studies. In this paper, we present a detailed study of the interaction region optics design for this newly proposed collision mode, exploring different potential configurations and their implementation in the FCC-ee global lattice, along with beam dynamics simulations and performance evaluations including the impact of "beamstrahlung." <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04210v1-abstract-full').style.display = 'none'; document.getElementById('2411.04210v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02834">arXiv:2411.02834</a> <span> [<a href="https://arxiv.org/pdf/2411.02834">pdf</a>, <a href="https://arxiv.org/format/2411.02834">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> </div> </div> <p class="title is-5 mathjax"> Utilizing a machine-learned potential to explore enhanced radiation tolerance in the MoNbTaVW high-entropy alloy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Liu%2C+J">Jiahui Liu</a>, <a href="/search/physics?searchtype=author&query=Byggmastar%2C+J">Jesper Byggmastar</a>, <a href="/search/physics?searchtype=author&query=Fan%2C+Z">Zheyong Fan</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bing Bai</a>, <a href="/search/physics?searchtype=author&query=Qian%2C+P">Ping Qian</a>, <a href="/search/physics?searchtype=author&query=Su%2C+Y">Yanjing Su</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02834v2-abstract-short" style="display: inline;"> High-entropy alloys (HEAs) based on tungsten (W) have emerged as promising candidates for plasma-facing components in future fusion reactors, owing to their excellent irradiation resistance. In this study, we construct an efficient machine-learned interatomic potential for the MoNbTaVW quinary system. This potential achieves computational speeds comparable to the embedded-atom method (EAM) potenti… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02834v2-abstract-full').style.display = 'inline'; document.getElementById('2411.02834v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02834v2-abstract-full" style="display: none;"> High-entropy alloys (HEAs) based on tungsten (W) have emerged as promising candidates for plasma-facing components in future fusion reactors, owing to their excellent irradiation resistance. In this study, we construct an efficient machine-learned interatomic potential for the MoNbTaVW quinary system. This potential achieves computational speeds comparable to the embedded-atom method (EAM) potential, allowing us to conduct a comprehensive investigation of the primary radiation damage through molecular dynamics simulations. Threshold displacement energies (TDEs) in the MoNbTaVW HEA are investigated and compared with pure metals. A series of displacement cascade simulations at primary knock-on atom energies ranging from 10 to 150 keV reveal significant differences in defect generation and clustering between MoNbTaVW HEA and pure W. In HEAs, we observe more surviving Frenkel pairs (FPs) but fewer and smaller interstitial clusters compared to W, indicating superior radiation tolerance. We propose extended damage models to quantify the radiation dose in the MoNbTaVW HEA, and suggest that one reason for their enhanced resistance is subcascade splitting, which reduces the formation of interstitial clusters. Our findings provide critical insights into the fundamental irradiation resistance mechanisms in refractory body-centered cubic alloys, offering guidance for the design of future radiation-tolerant materials. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02834v2-abstract-full').style.display = 'none'; document.getElementById('2411.02834v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.15521">arXiv:2410.15521</a> <span> [<a href="https://arxiv.org/pdf/2410.15521">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> </div> <p class="title is-5 mathjax"> Lying mirror </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuhang Li</a>, <a href="/search/physics?searchtype=author&query=Chen%2C+S">Shiqi Chen</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.15521v1-abstract-short" style="display: inline;"> We introduce an all-optical system, termed the "lying mirror", to hide input information by transforming it into misleading, ordinary-looking patterns that effectively camouflage the underlying image data and deceive the observers. This misleading transformation is achieved through passive light-matter interactions of the incident light with an optimized structured diffractive surface, enabling th… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15521v1-abstract-full').style.display = 'inline'; document.getElementById('2410.15521v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.15521v1-abstract-full" style="display: none;"> We introduce an all-optical system, termed the "lying mirror", to hide input information by transforming it into misleading, ordinary-looking patterns that effectively camouflage the underlying image data and deceive the observers. This misleading transformation is achieved through passive light-matter interactions of the incident light with an optimized structured diffractive surface, enabling the optical concealment of any form of secret input data without any digital computing. These lying mirror designs were shown to camouflage different types of input image data, exhibiting robustness against a range of adversarial manipulations, including random image noise as well as unknown, random rotations, shifts, and scaling of the object features. The feasibility of the lying mirror concept was also validated experimentally using a structured micro-mirror array along with multi-wavelength illumination at 480, 550 and 600 nm, covering the blue, green and red image channels. This framework showcases the power of structured diffractive surfaces for visual information processing and might find various applications in defense, security and entertainment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15521v1-abstract-full').style.display = 'none'; document.getElementById('2410.15521v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 Pages, 8 Figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.00837">arXiv:2404.00837</a> <span> [<a href="https://arxiv.org/pdf/2404.00837">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.34133/bmef.0048">10.34133/bmef.0048 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Automated HER2 Scoring in Breast Cancer Images Using Deep Learning and Pyramid Sampling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Selcuk%2C+S+Y">Sahan Yoruc Selcuk</a>, <a href="/search/physics?searchtype=author&query=Yang%2C+X">Xilin Yang</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Y">Yijie Zhang</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuzhu Li</a>, <a href="/search/physics?searchtype=author&query=Aydin%2C+M">Musa Aydin</a>, <a href="/search/physics?searchtype=author&query=Unal%2C+A+F">Aras Firat Unal</a>, <a href="/search/physics?searchtype=author&query=Gomatam%2C+A">Aditya Gomatam</a>, <a href="/search/physics?searchtype=author&query=Guo%2C+Z">Zhen Guo</a>, <a href="/search/physics?searchtype=author&query=Angus%2C+D+M">Darrow Morgan Angus</a>, <a href="/search/physics?searchtype=author&query=Kolodney%2C+G">Goren Kolodney</a>, <a href="/search/physics?searchtype=author&query=Atlan%2C+K">Karine Atlan</a>, <a href="/search/physics?searchtype=author&query=Haran%2C+T+K">Tal Keidar Haran</a>, <a href="/search/physics?searchtype=author&query=Pillar%2C+N">Nir Pillar</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.00837v1-abstract-short" style="display: inline;"> Human epidermal growth factor receptor 2 (HER2) is a critical protein in cancer cell growth that signifies the aggressiveness of breast cancer (BC) and helps predict its prognosis. Accurate assessment of immunohistochemically (IHC) stained tissue slides for HER2 expression levels is essential for both treatment guidance and understanding of cancer mechanisms. Nevertheless, the traditional workflow… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00837v1-abstract-full').style.display = 'inline'; document.getElementById('2404.00837v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.00837v1-abstract-full" style="display: none;"> Human epidermal growth factor receptor 2 (HER2) is a critical protein in cancer cell growth that signifies the aggressiveness of breast cancer (BC) and helps predict its prognosis. Accurate assessment of immunohistochemically (IHC) stained tissue slides for HER2 expression levels is essential for both treatment guidance and understanding of cancer mechanisms. Nevertheless, the traditional workflow of manual examination by board-certified pathologists encounters challenges, including inter- and intra-observer inconsistency and extended turnaround times. Here, we introduce a deep learning-based approach utilizing pyramid sampling for the automated classification of HER2 status in IHC-stained BC tissue images. Our approach analyzes morphological features at various spatial scales, efficiently managing the computational load and facilitating a detailed examination of cellular and larger-scale tissue-level details. This method addresses the tissue heterogeneity of HER2 expression by providing a comprehensive view, leading to a blind testing classification accuracy of 84.70%, on a dataset of 523 core images from tissue microarrays. Our automated system, proving reliable as an adjunct pathology tool, has the potential to enhance diagnostic precision and evaluation speed, and might significantly impact cancer treatment planning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00837v1-abstract-full').style.display = 'none'; document.getElementById('2404.00837v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 Pages, 7 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> BME Frontiers (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.09100">arXiv:2403.09100</a> <span> [<a href="https://arxiv.org/pdf/2403.09100">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41467-024-52263-z">10.1038/s41467-024-52263-z <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Virtual birefringence imaging and histological staining of amyloid deposits in label-free tissue using autofluorescence microscopy and deep learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Yang%2C+X">Xilin Yang</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Y">Yijie Zhang</a>, <a href="/search/physics?searchtype=author&query=Aydin%2C+M">Musa Aydin</a>, <a href="/search/physics?searchtype=author&query=Selcuk%2C+S+Y">Sahan Yoruc Selcuk</a>, <a href="/search/physics?searchtype=author&query=Guo%2C+Z">Zhen Guo</a>, <a href="/search/physics?searchtype=author&query=Fishbein%2C+G+A">Gregory A. Fishbein</a>, <a href="/search/physics?searchtype=author&query=Atlan%2C+K">Karine Atlan</a>, <a href="/search/physics?searchtype=author&query=Wallace%2C+W+D">William Dean Wallace</a>, <a href="/search/physics?searchtype=author&query=Pillar%2C+N">Nir Pillar</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.09100v1-abstract-short" style="display: inline;"> Systemic amyloidosis is a group of diseases characterized by the deposition of misfolded proteins in various organs and tissues, leading to progressive organ dysfunction and failure. Congo red stain is the gold standard chemical stain for the visualization of amyloid deposits in tissue sections, as it forms complexes with the misfolded proteins and shows a birefringence pattern under polarized lig… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.09100v1-abstract-full').style.display = 'inline'; document.getElementById('2403.09100v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.09100v1-abstract-full" style="display: none;"> Systemic amyloidosis is a group of diseases characterized by the deposition of misfolded proteins in various organs and tissues, leading to progressive organ dysfunction and failure. Congo red stain is the gold standard chemical stain for the visualization of amyloid deposits in tissue sections, as it forms complexes with the misfolded proteins and shows a birefringence pattern under polarized light microscopy. However, Congo red staining is tedious and costly to perform, and prone to false diagnoses due to variations in the amount of amyloid, staining quality and expert interpretation through manual examination of tissue under a polarization microscope. Here, we report the first demonstration of virtual birefringence imaging and virtual Congo red staining of label-free human tissue to show that a single trained neural network can rapidly transform autofluorescence images of label-free tissue sections into brightfield and polarized light microscopy equivalent images, matching the histochemically stained versions of the same samples. We demonstrate the efficacy of our method with blind testing and pathologist evaluations on cardiac tissue where the virtually stained images agreed well with the histochemically stained ground truth images. Our virtually stained polarization and brightfield images highlight amyloid birefringence patterns in a consistent, reproducible manner while mitigating diagnostic challenges due to variations in the quality of chemical staining and manual imaging processes as part of the clinical workflow. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.09100v1-abstract-full').style.display = 'none'; document.getElementById('2403.09100v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 Pages, 5 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Nature Communications (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.02397">arXiv:2402.02397</a> <span> [<a href="https://arxiv.org/pdf/2402.02397">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1002/lpor.202400238">10.1002/lpor.202400238 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Multiplexed all-optical permutation operations using a reconfigurable diffractive optical network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Ma%2C+G">Guangdong Ma</a>, <a href="/search/physics?searchtype=author&query=Yang%2C+X">Xilin Yang</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Li%2C+J">Jingxi Li</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuhang Li</a>, <a href="/search/physics?searchtype=author&query=Gan%2C+T">Tianyi Gan</a>, <a href="/search/physics?searchtype=author&query=Shen%2C+C">Che-Yung Shen</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Y">Yijie Zhang</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuzhu Li</a>, <a href="/search/physics?searchtype=author&query=Jarrahi%2C+M">Mona Jarrahi</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.02397v1-abstract-short" style="display: inline;"> Large-scale and high-dimensional permutation operations are important for various applications in e.g., telecommunications and encryption. Here, we demonstrate the use of all-optical diffractive computing to execute a set of high-dimensional permutation operations between an input and output field-of-view through layer rotations in a diffractive optical network. In this reconfigurable multiplexed… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.02397v1-abstract-full').style.display = 'inline'; document.getElementById('2402.02397v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.02397v1-abstract-full" style="display: none;"> Large-scale and high-dimensional permutation operations are important for various applications in e.g., telecommunications and encryption. Here, we demonstrate the use of all-optical diffractive computing to execute a set of high-dimensional permutation operations between an input and output field-of-view through layer rotations in a diffractive optical network. In this reconfigurable multiplexed material designed by deep learning, every diffractive layer has four orientations: 0, 90, 180, and 270 degrees. Each unique combination of these rotatable layers represents a distinct rotation state of the diffractive design tailored for a specific permutation operation. Therefore, a K-layer rotatable diffractive material is capable of all-optically performing up to 4^K independent permutation operations. The original input information can be decrypted by applying the specific inverse permutation matrix to output patterns, while applying other inverse operations will lead to loss of information. We demonstrated the feasibility of this reconfigurable multiplexed diffractive design by approximating 256 randomly selected permutation matrices using K=4 rotatable diffractive layers. We also experimentally validated this reconfigurable diffractive network using terahertz radiation and 3D-printed diffractive layers, providing a decent match to our numerical results. The presented rotation-multiplexed diffractive processor design is particularly useful due to its mechanical reconfigurability, offering multifunctional representation through a single fabrication process. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.02397v1-abstract-full').style.display = 'none'; document.getElementById('2402.02397v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">37 Pages, 10 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Laser & Photonics Reviews (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.08923">arXiv:2401.08923</a> <span> [<a href="https://arxiv.org/pdf/2401.08923">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1186/s43593-024-00067-5">10.1186/s43593-024-00067-5 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Subwavelength Imaging using a Solid-Immersion Diffractive Optical Processor </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Hu%2C+J">Jingtian Hu</a>, <a href="/search/physics?searchtype=author&query=Liao%2C+K">Kun Liao</a>, <a href="/search/physics?searchtype=author&query=Dinc%2C+N+U">Niyazi Ulas Dinc</a>, <a href="/search/physics?searchtype=author&query=Gigli%2C+C">Carlo Gigli</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Gan%2C+T">Tianyi Gan</a>, <a href="/search/physics?searchtype=author&query=Li%2C+X">Xurong Li</a>, <a href="/search/physics?searchtype=author&query=Chen%2C+H">Hanlong Chen</a>, <a href="/search/physics?searchtype=author&query=Yang%2C+X">Xilin Yang</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuhang Li</a>, <a href="/search/physics?searchtype=author&query=Isil%2C+C">Cagatay Isil</a>, <a href="/search/physics?searchtype=author&query=Rahman%2C+M+S+S">Md Sadman Sakib Rahman</a>, <a href="/search/physics?searchtype=author&query=Li%2C+J">Jingxi Li</a>, <a href="/search/physics?searchtype=author&query=Hu%2C+X">Xiaoyong Hu</a>, <a href="/search/physics?searchtype=author&query=Jarrahi%2C+M">Mona Jarrahi</a>, <a href="/search/physics?searchtype=author&query=Psaltis%2C+D">Demetri Psaltis</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.08923v1-abstract-short" style="display: inline;"> Phase imaging is widely used in biomedical imaging, sensing, and material characterization, among other fields. However, direct imaging of phase objects with subwavelength resolution remains a challenge. Here, we demonstrate subwavelength imaging of phase and amplitude objects based on all-optical diffractive encoding and decoding. To resolve subwavelength features of an object, the diffractive im… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.08923v1-abstract-full').style.display = 'inline'; document.getElementById('2401.08923v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.08923v1-abstract-full" style="display: none;"> Phase imaging is widely used in biomedical imaging, sensing, and material characterization, among other fields. However, direct imaging of phase objects with subwavelength resolution remains a challenge. Here, we demonstrate subwavelength imaging of phase and amplitude objects based on all-optical diffractive encoding and decoding. To resolve subwavelength features of an object, the diffractive imager uses a thin, high-index solid-immersion layer to transmit high-frequency information of the object to a spatially-optimized diffractive encoder, which converts/encodes high-frequency information of the input into low-frequency spatial modes for transmission through air. The subsequent diffractive decoder layers (in air) are jointly designed with the encoder using deep-learning-based optimization, and communicate with the encoder layer to create magnified images of input objects at its output, revealing subwavelength features that would otherwise be washed away due to diffraction limit. We demonstrate that this all-optical collaboration between a diffractive solid-immersion encoder and the following decoder layers in air can resolve subwavelength phase and amplitude features of input objects in a highly compact design. To experimentally demonstrate its proof-of-concept, we used terahertz radiation and developed a fabrication method for creating monolithic multi-layer diffractive processors. Through these monolithically fabricated diffractive encoder-decoder pairs, we demonstrated phase-to-intensity transformations and all-optically reconstructed subwavelength phase features of input objects by directly transforming them into magnified intensity features at the output. This solid-immersion-based diffractive imager, with its compact and cost-effective design, can find wide-ranging applications in bioimaging, endoscopy, sensing and materials characterization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.08923v1-abstract-full').style.display = 'none'; document.getElementById('2401.08923v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">32 Pages, 9 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> eLight (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.07856">arXiv:2401.07856</a> <span> [<a href="https://arxiv.org/pdf/2401.07856">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1126/sciadv.adn9420">10.1126/sciadv.adn9420 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Information hiding cameras: optical concealment of object information into ordinary images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Lee%2C+R">Ryan Lee</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuhang Li</a>, <a href="/search/physics?searchtype=author&query=Gan%2C+T">Tianyi Gan</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+Y">Yuntian Wang</a>, <a href="/search/physics?searchtype=author&query=Jarrahi%2C+M">Mona Jarrahi</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.07856v1-abstract-short" style="display: inline;"> Data protection methods like cryptography, despite being effective, inadvertently signal the presence of secret communication, thereby drawing undue attention. Here, we introduce an optical information hiding camera integrated with an electronic decoder, optimized jointly through deep learning. This information hiding-decoding system employs a diffractive optical processor as its front-end, which… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.07856v1-abstract-full').style.display = 'inline'; document.getElementById('2401.07856v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.07856v1-abstract-full" style="display: none;"> Data protection methods like cryptography, despite being effective, inadvertently signal the presence of secret communication, thereby drawing undue attention. Here, we introduce an optical information hiding camera integrated with an electronic decoder, optimized jointly through deep learning. This information hiding-decoding system employs a diffractive optical processor as its front-end, which transforms and hides input images in the form of ordinary-looking patterns that deceive/mislead human observers. This information hiding transformation is valid for infinitely many combinations of secret messages, all of which are transformed into ordinary-looking output patterns, achieved all-optically through passive light-matter interactions within the optical processor. By processing these ordinary-looking output images, a jointly-trained electronic decoder neural network accurately reconstructs the original information hidden within the deceptive output pattern. We numerically demonstrated our approach by designing an information hiding diffractive camera along with a jointly-optimized convolutional decoder neural network. The efficacy of this system was demonstrated under various lighting conditions and noise levels, showing its robustness. We further extended this information hiding camera to multi-spectral operation, allowing the concealment and decoding of multiple images at different wavelengths, all performed simultaneously in a single feed-forward operation. The feasibility of our framework was also demonstrated experimentally using THz radiation. This optical encoder-electronic decoder-based co-design provides a novel information hiding camera interface that is both high-speed and energy-efficient, offering an intriguing solution for visual information security. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.07856v1-abstract-full').style.display = 'none'; document.getElementById('2401.07856v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">26 Pages, 8 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Science Advances (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.02927">arXiv:2311.02927</a> <span> [<a href="https://arxiv.org/pdf/2311.02927">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Biological Physics">physics.bio-ph</span> </div> </div> <p class="title is-5 mathjax"> Auto-ICell: An Accessible and Cost-Effective Integrative Droplet Microfluidic System for Real-Time Single-Cell Morphological and Apoptotic Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Wei%2C+Y">Yuanyuan Wei</a>, <a href="/search/physics?searchtype=author&query=Lin%2C+M">Meiai Lin</a>, <a href="/search/physics?searchtype=author&query=Luo%2C+S">Shanhang Luo</a>, <a href="/search/physics?searchtype=author&query=Abbasi%2C+S+M+T">Syed Muhammad Tariq Abbasi</a>, <a href="/search/physics?searchtype=author&query=Tan%2C+L">Liwei Tan</a>, <a href="/search/physics?searchtype=author&query=Cheng%2C+G">Guangyao Cheng</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Ho%2C+Y">Yi-Ping Ho</a>, <a href="/search/physics?searchtype=author&query=Yuan%2C+S+W">Scott Wu Yuan</a>, <a href="/search/physics?searchtype=author&query=Ho%2C+H">Ho-Pui Ho</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.02927v1-abstract-short" style="display: inline;"> The Auto-ICell system, a novel, and cost-effective integrated droplet microfluidic system, is introduced for real-time analysis of single-cell morphology and apoptosis. This system integrates a 3D-printed microfluidic chip with image analysis algorithms, enabling the generation of uniform droplet reactors and immediate image analysis. The system employs a color-based image analysis algorithm in th… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.02927v1-abstract-full').style.display = 'inline'; document.getElementById('2311.02927v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.02927v1-abstract-full" style="display: none;"> The Auto-ICell system, a novel, and cost-effective integrated droplet microfluidic system, is introduced for real-time analysis of single-cell morphology and apoptosis. This system integrates a 3D-printed microfluidic chip with image analysis algorithms, enabling the generation of uniform droplet reactors and immediate image analysis. The system employs a color-based image analysis algorithm in the bright field for droplet content analysis. Meanwhile, in the fluorescence field, cell apoptosis is quantitatively measured through a combination of deep-learning-enabled multiple fluorescent channel analysis and a live/dead cell stain kit. Breast cancer cells are encapsulated within uniform droplets, with diameters ranging from 70 渭m to 240 渭m, generated at a high throughput of 1,500 droplets per minute. Real-time image analysis results are displayed within 2 seconds on a custom graphical user interface (GUI). The system provides an automatic calculation of the distribution and ratio of encapsulated dyes in the bright field, and in the fluorescent field, cell blebbing and cell circularity are observed and quantified respectively. The Auto-ICell system is non-invasive and provides online detection, offering a robust, time-efficient, user-friendly, and cost-effective solution for single-cell analysis. It significantly enhances the detection throughput of droplet single-cell analysis by reducing setup costs and improving operational performance. This study highlights the potential of the Auto-ICell system in advancing biological research and personalized disease treatment, with promising applications in cell culture, biochemical microreactors, drug carriers, cell-based assays, synthetic biology, and point-of-care diagnostics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.02927v1-abstract-full').style.display = 'none'; document.getElementById('2311.02927v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.03384">arXiv:2310.03384</a> <span> [<a href="https://arxiv.org/pdf/2310.03384">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1117/1.APN.3.1.016010">10.1117/1.APN.3.1.016010 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Complex-valued universal linear transformations and image encryption using spatially incoherent diffractive networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Yang%2C+X">Xilin Yang</a>, <a href="/search/physics?searchtype=author&query=Rahman%2C+M+S+S">Md Sadman Sakib Rahman</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Li%2C+J">Jingxi Li</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.03384v1-abstract-short" style="display: inline;"> As an optical processor, a Diffractive Deep Neural Network (D2NN) utilizes engineered diffractive surfaces designed through machine learning to perform all-optical information processing, completing its tasks at the speed of light propagation through thin optical layers. With sufficient degrees-of-freedom, D2NNs can perform arbitrary complex-valued linear transformations using spatially coherent l… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.03384v1-abstract-full').style.display = 'inline'; document.getElementById('2310.03384v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.03384v1-abstract-full" style="display: none;"> As an optical processor, a Diffractive Deep Neural Network (D2NN) utilizes engineered diffractive surfaces designed through machine learning to perform all-optical information processing, completing its tasks at the speed of light propagation through thin optical layers. With sufficient degrees-of-freedom, D2NNs can perform arbitrary complex-valued linear transformations using spatially coherent light. Similarly, D2NNs can also perform arbitrary linear intensity transformations with spatially incoherent illumination; however, under spatially incoherent light, these transformations are non-negative, acting on diffraction-limited optical intensity patterns at the input field-of-view (FOV). Here, we expand the use of spatially incoherent D2NNs to complex-valued information processing for executing arbitrary complex-valued linear transformations using spatially incoherent light. Through simulations, we show that as the number of optimized diffractive features increases beyond a threshold dictated by the multiplication of the input and output space-bandwidth products, a spatially incoherent diffractive visual processor can approximate any complex-valued linear transformation and be used for all-optical image encryption using incoherent illumination. The findings are important for the all-optical processing of information under natural light using various forms of diffractive surface-based optical processors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.03384v1-abstract-full').style.display = 'none'; document.getElementById('2310.03384v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 Pages, 3 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Advanced Photonics Nexus (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.15019">arXiv:2308.15019</a> <span> [<a href="https://arxiv.org/pdf/2308.15019">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41377-024-01543-w">10.1038/s41377-024-01543-w <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Pyramid diffractive optical networks for unidirectional image magnification and demagnification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Yang%2C+X">Xilin Yang</a>, <a href="/search/physics?searchtype=author&query=Gan%2C+T">Tianyi Gan</a>, <a href="/search/physics?searchtype=author&query=Li%2C+J">Jingxi Li</a>, <a href="/search/physics?searchtype=author&query=Mengu%2C+D">Deniz Mengu</a>, <a href="/search/physics?searchtype=author&query=Jarrahi%2C+M">Mona Jarrahi</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.15019v2-abstract-short" style="display: inline;"> Diffractive deep neural networks (D2NNs) are composed of successive transmissive layers optimized using supervised deep learning to all-optically implement various computational tasks between an input and output field-of-view (FOV). Here, we present a pyramid-structured diffractive optical network design (which we term P-D2NN), optimized specifically for unidirectional image magnification and dema… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.15019v2-abstract-full').style.display = 'inline'; document.getElementById('2308.15019v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.15019v2-abstract-full" style="display: none;"> Diffractive deep neural networks (D2NNs) are composed of successive transmissive layers optimized using supervised deep learning to all-optically implement various computational tasks between an input and output field-of-view (FOV). Here, we present a pyramid-structured diffractive optical network design (which we term P-D2NN), optimized specifically for unidirectional image magnification and demagnification. In this design, the diffractive layers are pyramidally scaled in alignment with the direction of the image magnification or demagnification. This P-D2NN design creates high-fidelity magnified or demagnified images in only one direction, while inhibiting the image formation in the opposite direction - achieving the desired unidirectional imaging operation using a much smaller number of diffractive degrees of freedom within the optical processor volume. Furthermore, P-D2NN design maintains its unidirectional image magnification/demagnification functionality across a large band of illumination wavelengths despite being trained with a single wavelength. We also designed a wavelength-multiplexed P-D2NN, where a unidirectional magnifier and a unidirectional demagnifier operate simultaneously in opposite directions, at two distinct illumination wavelengths. Furthermore, we demonstrate that by cascading multiple unidirectional P-D2NN modules, we can achieve higher magnification factors. The efficacy of the P-D2NN architecture was also validated experimentally using terahertz illumination, successfully matching our numerical simulations. P-D2NN offers a physics-inspired strategy for designing task-specific visual processors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.15019v2-abstract-full').style.display = 'none'; document.getElementById('2308.15019v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">41 Pages, 11 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Light: Science & Applications (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.03777">arXiv:2308.03777</a> <span> [<a href="https://arxiv.org/pdf/2308.03777">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Biological Physics">physics.bio-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Lab-in-a-Tube: A portable imaging spectrophotometer for cost-effective, high-throughput, and label-free analysis of centrifugation processes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Wei%2C+Y">Yuanyuan Wei</a>, <a href="/search/physics?searchtype=author&query=Hu%2C+D">Dehua Hu</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Meng%2C+C">Chenqi Meng</a>, <a href="/search/physics?searchtype=author&query=Chan%2C+T+K">Tsz Kin Chan</a>, <a href="/search/physics?searchtype=author&query=Zhao%2C+X">Xing Zhao</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+Y">Yuye Wang</a>, <a href="/search/physics?searchtype=author&query=Ho%2C+Y">Yi-Ping Ho</a>, <a href="/search/physics?searchtype=author&query=Yuan%2C+W">Wu Yuan</a>, <a href="/search/physics?searchtype=author&query=Ho%2C+H">Ho-Pui Ho</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.03777v1-abstract-short" style="display: inline;"> Centrifuges serve as essential instruments in modern experimental sciences, facilitating a wide range of routine sample processing tasks that necessitate material sedimentation. However, the study for real time observation of the dynamical process during centrifugation has remained elusive. In this study, we developed an innovative Lab_in_a_Tube imaging spectrophotometer that incorporates capabili… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.03777v1-abstract-full').style.display = 'inline'; document.getElementById('2308.03777v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.03777v1-abstract-full" style="display: none;"> Centrifuges serve as essential instruments in modern experimental sciences, facilitating a wide range of routine sample processing tasks that necessitate material sedimentation. However, the study for real time observation of the dynamical process during centrifugation has remained elusive. In this study, we developed an innovative Lab_in_a_Tube imaging spectrophotometer that incorporates capabilities of real time image analysis and programmable interruption. This portable LIAT device costs less than 30 US dollars. Based on our knowledge, it is the first Wi Fi camera built_in in common lab centrifuges with active closed_loop control. We tested our LIAT imaging spectrophotometer with solute solvent interaction investigation obtained from lab centrifuges with quantitative data plotting in a real time manner. Single re circulating flow was real time observed, forming the ring shaped pattern during centrifugation. To the best of our knowledge, this is the very first observation of similar phenomena. We developed theoretical simulations for the single particle in a rotating reference frame, which correlated well with experimental results. We also demonstrated the first demonstration to visualize the blood sedimentation process in clinical lab centrifuges. This remarkable cost effectiveness opens up exciting opportunities for centrifugation microbiology research and paves the way for the creation of a network of computational imaging spectrometers at an affordable price for large scale and continuous monitoring of centrifugal processes in general. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.03777v1-abstract-full').style.display = 'none'; document.getElementById('2308.03777v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 Pages, 6 Figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.15845">arXiv:2306.15845</a> <span> [<a href="https://arxiv.org/pdf/2306.15845">pdf</a>, <a href="https://arxiv.org/format/2306.15845">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Integrated Photonic Reservoir Computing with All-Optical Readout </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Ma%2C+C">Chonghuai Ma</a>, <a href="/search/physics?searchtype=author&query=Van+Kerrebrouck%2C+J">Joris Van Kerrebrouck</a>, <a href="/search/physics?searchtype=author&query=Deng%2C+H">Hong Deng</a>, <a href="/search/physics?searchtype=author&query=Sackesyn%2C+S">Stijn Sackesyn</a>, <a href="/search/physics?searchtype=author&query=Gooskens%2C+E">Emmanuel Gooskens</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bing Bai</a>, <a href="/search/physics?searchtype=author&query=Dambre%2C+J">Joni Dambre</a>, <a href="/search/physics?searchtype=author&query=Bienstman%2C+P">Peter Bienstman</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.15845v1-abstract-short" style="display: inline;"> Integrated photonic reservoir computing has been demonstrated to be able to tackle different problems because of its neural network nature. A key advantage of photonic reservoir computing over other neuromorphic paradigms is its straightforward readout system, which facilitates both rapid training and robust, fabrication variation-insensitive photonic integrated hardware implementation for real-ti… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.15845v1-abstract-full').style.display = 'inline'; document.getElementById('2306.15845v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.15845v1-abstract-full" style="display: none;"> Integrated photonic reservoir computing has been demonstrated to be able to tackle different problems because of its neural network nature. A key advantage of photonic reservoir computing over other neuromorphic paradigms is its straightforward readout system, which facilitates both rapid training and robust, fabrication variation-insensitive photonic integrated hardware implementation for real-time processing. We present our recent development of a fully-optical, coherent photonic reservoir chip integrated with an optical readout system, capitalizing on these benefits. Alongside the integrated system, we also demonstrate a weight update strategy that is suitable for the integrated optical readout hardware. Using this online training scheme, we successfully solved 3-bit header recognition and delayed XOR tasks at 20 Gbps in real-time, all within the optical domain without excess delays. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.15845v1-abstract-full').style.display = 'none'; document.getElementById('2306.15845v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.13298">arXiv:2304.13298</a> <span> [<a href="https://arxiv.org/pdf/2304.13298">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41467-024-46387-5">10.1038/s41467-024-46387-5 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Broadband nonlinear modulation of incoherent light using a transparent optoelectronic neuron array </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Zhang%2C+D">Dehui Zhang</a>, <a href="/search/physics?searchtype=author&query=Xu%2C+D">Dong Xu</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuhang Li</a>, <a href="/search/physics?searchtype=author&query=Luo%2C+Y">Yi Luo</a>, <a href="/search/physics?searchtype=author&query=Hu%2C+J">Jingtian Hu</a>, <a href="/search/physics?searchtype=author&query=Zhou%2C+J">Jingxuan Zhou</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Y">Yucheng Zhang</a>, <a href="/search/physics?searchtype=author&query=Zhou%2C+B">Boxuan Zhou</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+P">Peiqi Wang</a>, <a href="/search/physics?searchtype=author&query=Li%2C+X">Xurong Li</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Ren%2C+H">Huaying Ren</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+L">Laiyuan Wang</a>, <a href="/search/physics?searchtype=author&query=Jarrahi%2C+M">Mona Jarrahi</a>, <a href="/search/physics?searchtype=author&query=Huang%2C+Y">Yu Huang</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a>, <a href="/search/physics?searchtype=author&query=Duan%2C+X">Xiangfeng Duan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.13298v1-abstract-short" style="display: inline;"> Nonlinear optical processing of ambient natural light is highly desired in computational imaging and sensing applications. A strong optical nonlinear response that can work under weak broadband incoherent light is essential for this purpose. Here we introduce an optoelectronic nonlinear filter array that can address this emerging need. By merging 2D transparent phototransistors (TPTs) with liquid… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.13298v1-abstract-full').style.display = 'inline'; document.getElementById('2304.13298v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.13298v1-abstract-full" style="display: none;"> Nonlinear optical processing of ambient natural light is highly desired in computational imaging and sensing applications. A strong optical nonlinear response that can work under weak broadband incoherent light is essential for this purpose. Here we introduce an optoelectronic nonlinear filter array that can address this emerging need. By merging 2D transparent phototransistors (TPTs) with liquid crystal (LC) modulators, we create an optoelectronic neuron array that allows self-amplitude modulation of spatially incoherent light, achieving a large nonlinear contrast over a broad spectrum at orders-of-magnitude lower intensity than what is achievable in most optical nonlinear materials. For a proof-of-concept demonstration, we fabricated a 10,000-pixel array of optoelectronic neurons, each serving as a nonlinear filter, and experimentally demonstrated an intelligent imaging system that uses the nonlinear response to instantly reduce input glares while retaining the weaker-intensity objects within the field of view of a cellphone camera. This intelligent glare-reduction capability is important for various imaging applications, including autonomous driving, machine vision, and security cameras. Beyond imaging and sensing, this optoelectronic neuron array, with its rapid nonlinear modulation for processing incoherent broadband light, might also find applications in optical computing, where nonlinear activation functions that can work under ambient light conditions are highly sought. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.13298v1-abstract-full').style.display = 'none'; document.getElementById('2304.13298v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 Pages, 5 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Nature Communications (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.17164">arXiv:2303.17164</a> <span> [<a href="https://arxiv.org/pdf/2303.17164">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1117/1.AP.5.4.046009">10.1117/1.AP.5.4.046009 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Optical information transfer through random unknown diffusers using electronic encoding and diffractive decoding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuhang Li</a>, <a href="/search/physics?searchtype=author&query=Gan%2C+T">Tianyi Gan</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Isil%2C+C">Cagatay Isil</a>, <a href="/search/physics?searchtype=author&query=Jarrahi%2C+M">Mona Jarrahi</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.17164v1-abstract-short" style="display: inline;"> Free-space optical information transfer through diffusive media is critical in many applications, such as biomedical devices and optical communication, but remains challenging due to random, unknown perturbations in the optical path. In this work, we demonstrate an optical diffractive decoder with electronic encoding to accurately transfer the optical information of interest, corresponding to, e.g… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.17164v1-abstract-full').style.display = 'inline'; document.getElementById('2303.17164v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.17164v1-abstract-full" style="display: none;"> Free-space optical information transfer through diffusive media is critical in many applications, such as biomedical devices and optical communication, but remains challenging due to random, unknown perturbations in the optical path. In this work, we demonstrate an optical diffractive decoder with electronic encoding to accurately transfer the optical information of interest, corresponding to, e.g., any arbitrary input object or message, through unknown random phase diffusers along the optical path. This hybrid electronic-optical model, trained using supervised learning, comprises a convolutional neural network (CNN) based electronic encoder and successive passive diffractive layers that are jointly optimized. After their joint training using deep learning, our hybrid model can transfer optical information through unknown phase diffusers, demonstrating generalization to new random diffusers never seen before. The resulting electronic-encoder and the optical-decoder model were experimentally validated using a 3D-printed diffractive network that axially spans less than 70 x lambda, where lambda = 0.75 mm is the illumination wavelength in the terahertz spectrum, carrying the desired optical information through random unknown diffusers. The presented framework can be physically scaled to operate at different parts of the electromagnetic spectrum, without retraining its components, and would offer low-power and compact solutions for optical information transfer in free space through unknown random diffusive media. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.17164v1-abstract-full').style.display = 'none'; document.getElementById('2303.17164v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">32 Pages, 9 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Advanced Photonics (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.13037">arXiv:2303.13037</a> <span> [<a href="https://arxiv.org/pdf/2303.13037">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41377-023-01234-y">10.1038/s41377-023-01234-y <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Universal Linear Intensity Transformations Using Spatially-Incoherent Diffractive Processors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Rahman%2C+M+S+S">Md Sadman Sakib Rahman</a>, <a href="/search/physics?searchtype=author&query=Yang%2C+X">Xilin Yang</a>, <a href="/search/physics?searchtype=author&query=Li%2C+J">Jingxi Li</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.13037v1-abstract-short" style="display: inline;"> Under spatially-coherent light, a diffractive optical network composed of structured surfaces can be designed to perform any arbitrary complex-valued linear transformation between its input and output fields-of-view (FOVs) if the total number (N) of optimizable phase-only diffractive features is greater than or equal to ~2 Ni x No, where Ni and No refer to the number of useful pixels at the input… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.13037v1-abstract-full').style.display = 'inline'; document.getElementById('2303.13037v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.13037v1-abstract-full" style="display: none;"> Under spatially-coherent light, a diffractive optical network composed of structured surfaces can be designed to perform any arbitrary complex-valued linear transformation between its input and output fields-of-view (FOVs) if the total number (N) of optimizable phase-only diffractive features is greater than or equal to ~2 Ni x No, where Ni and No refer to the number of useful pixels at the input and the output FOVs, respectively. Here we report the design of a spatially-incoherent diffractive optical processor that can approximate any arbitrary linear transformation in time-averaged intensity between its input and output FOVs. Under spatially-incoherent monochromatic light, the spatially-varying intensity point spread functon(H) of a diffractive network, corresponding to a given, arbitrarily-selected linear intensity transformation, can be written as H(m,n;m',n')=|h(m,n;m',n')|^2, where h is the spatially-coherent point-spread function of the same diffractive network, and (m,n) and (m',n') define the coordinates of the output and input FOVs, respectively. Using deep learning, supervised through examples of input-output profiles, we numerically demonstrate that a spatially-incoherent diffractive network can be trained to all-optically perform any arbitrary linear intensity transformation between its input and output if N is greater than or equal to ~2 Ni x No. These results constitute the first demonstration of universal linear intensity transformations performed on an input FOV under spatially-incoherent illumination and will be useful for designing all-optical visual processors that can work with incoherent, natural light. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.13037v1-abstract-full').style.display = 'none'; document.getElementById('2303.13037v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">29 Pages, 10 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Light: Science & Applications (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.03793">arXiv:2303.03793</a> <span> [<a href="https://arxiv.org/pdf/2303.03793">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Biological Physics">physics.bio-ph</span> </div> </div> <p class="title is-5 mathjax"> Roadmap on Deep Learning for Microscopy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Volpe%2C+G">Giovanni Volpe</a>, <a href="/search/physics?searchtype=author&query=W%C3%A4hlby%2C+C">Carolina W盲hlby</a>, <a href="/search/physics?searchtype=author&query=Tian%2C+L">Lei Tian</a>, <a href="/search/physics?searchtype=author&query=Hecht%2C+M">Michael Hecht</a>, <a href="/search/physics?searchtype=author&query=Yakimovich%2C+A">Artur Yakimovich</a>, <a href="/search/physics?searchtype=author&query=Monakhova%2C+K">Kristina Monakhova</a>, <a href="/search/physics?searchtype=author&query=Waller%2C+L">Laura Waller</a>, <a href="/search/physics?searchtype=author&query=Sbalzarini%2C+I+F">Ivo F. Sbalzarini</a>, <a href="/search/physics?searchtype=author&query=Metzler%2C+C+A">Christopher A. Metzler</a>, <a href="/search/physics?searchtype=author&query=Xie%2C+M">Mingyang Xie</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+K">Kevin Zhang</a>, <a href="/search/physics?searchtype=author&query=Lenton%2C+I+C+D">Isaac C. D. Lenton</a>, <a href="/search/physics?searchtype=author&query=Rubinsztein-Dunlop%2C+H">Halina Rubinsztein-Dunlop</a>, <a href="/search/physics?searchtype=author&query=Brunner%2C+D">Daniel Brunner</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a>, <a href="/search/physics?searchtype=author&query=Midtvedt%2C+D">Daniel Midtvedt</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+H">Hao Wang</a>, <a href="/search/physics?searchtype=author&query=Sladoje%2C+N">Nata拧a Sladoje</a>, <a href="/search/physics?searchtype=author&query=Lindblad%2C+J">Joakim Lindblad</a>, <a href="/search/physics?searchtype=author&query=Smith%2C+J+T">Jason T. Smith</a>, <a href="/search/physics?searchtype=author&query=Ochoa%2C+M">Marien Ochoa</a>, <a href="/search/physics?searchtype=author&query=Barroso%2C+M">Margarida Barroso</a>, <a href="/search/physics?searchtype=author&query=Intes%2C+X">Xavier Intes</a>, <a href="/search/physics?searchtype=author&query=Qiu%2C+T">Tong Qiu</a> , et al. (50 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.03793v1-abstract-short" style="display: inline;"> Through digital imaging, microscopy has evolved from primarily being a means for visual observation of life at the micro- and nano-scale, to a quantitative tool with ever-increasing resolution and throughput. Artificial intelligence, deep neural networks, and machine learning are all niche terms describing computational methods that have gained a pivotal role in microscopy-based research over the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.03793v1-abstract-full').style.display = 'inline'; document.getElementById('2303.03793v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.03793v1-abstract-full" style="display: none;"> Through digital imaging, microscopy has evolved from primarily being a means for visual observation of life at the micro- and nano-scale, to a quantitative tool with ever-increasing resolution and throughput. Artificial intelligence, deep neural networks, and machine learning are all niche terms describing computational methods that have gained a pivotal role in microscopy-based research over the past decade. This Roadmap is written collectively by prominent researchers and encompasses selected aspects of how machine learning is applied to microscopy image data, with the aim of gaining scientific knowledge by improved image quality, automated detection, segmentation, classification and tracking of objects, and efficient merging of information from multiple imaging modalities. We aim to give the reader an overview of the key developments and an understanding of possibilities and limitations of machine learning for microscopy. It will be of interest to a wide cross-disciplinary audience in the physical sciences and life sciences. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.03793v1-abstract-full').style.display = 'none'; document.getElementById('2303.03793v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.03652">arXiv:2302.03652</a> <span> [<a href="https://arxiv.org/pdf/2302.03652">pdf</a>, <a href="https://arxiv.org/format/2302.03652">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> </div> <p class="title is-5 mathjax"> Slow light silicon modulator beyond 110 GHz bandwidth </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Han%2C+C">Changhao Han</a>, <a href="/search/physics?searchtype=author&query=Zheng%2C+Z">Zhao Zheng</a>, <a href="/search/physics?searchtype=author&query=Shu%2C+H">Haowen Shu</a>, <a href="/search/physics?searchtype=author&query=Jin%2C+M">Ming Jin</a>, <a href="/search/physics?searchtype=author&query=Qin%2C+J">Jun Qin</a>, <a href="/search/physics?searchtype=author&query=Chen%2C+R">Ruixuan Chen</a>, <a href="/search/physics?searchtype=author&query=Tao%2C+Y">Yuansheng Tao</a>, <a href="/search/physics?searchtype=author&query=Shen%2C+B">Bitao Shen</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bowen Bai</a>, <a href="/search/physics?searchtype=author&query=Yang%2C+F">Fenghe Yang</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+Y">Yimeng Wang</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+H">Haoyu Wang</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+F">Feifan Wang</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Z">Zixuan Zhang</a>, <a href="/search/physics?searchtype=author&query=Yu%2C+S">Shaohua Yu</a>, <a href="/search/physics?searchtype=author&query=Peng%2C+C">Chao Peng</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+X">Xingjun Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.03652v1-abstract-short" style="display: inline;"> Silicon modulators are key components in silicon photonics to support the dense integration of electro-optic (EO) functional elements on a compact chip for various applications including high-speed data transmission, signal processing, and photonic computing. Despite numerous advances in promoting the operation speed of silicon modulators, a bandwidth ceiling of 67 GHz emerges in practices and bec… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.03652v1-abstract-full').style.display = 'inline'; document.getElementById('2302.03652v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.03652v1-abstract-full" style="display: none;"> Silicon modulators are key components in silicon photonics to support the dense integration of electro-optic (EO) functional elements on a compact chip for various applications including high-speed data transmission, signal processing, and photonic computing. Despite numerous advances in promoting the operation speed of silicon modulators, a bandwidth ceiling of 67 GHz emerges in practices and becomes an obstacle to paving silicon photonics toward Tbps level data throughput on a single chip. Here, we theoretically propose and experimentally demonstrate a design strategy for silicon modulators by employing the slow light effect, which shatters the present bandwidth ceiling of silicon modulators and pushes its limit beyond 110 GHz in a small footprint. The proposed silicon modulator is built on a coupled-resonator optical waveguide (CROW) architecture, in which a set of Bragg gratings are appropriately cascaded to give rise to a slow light effect. By comprehensively balancing a series of merits including the group index, photon lifetime, electrical bandwidth, and losses, we found the modulators can benefit from the slow light for better modulation efficiency and compact size while remaining their bandwidth sufficiently high to support ultra-high-speed data transmission. Consequently, we realize a modulator with an EO bandwidth of 110 GHz in a length of 124 渭m, and demonstrate a data rate beyond 110 Gbps by applying simple on-off keying modulation for a DSP-free operation. Our work proves that silicon modulators beyond 110 GHz are feasible, thus shedding light on the potentials of silicon photonics in ultra-high-bandwidth applications such as data communication, optical interconnection, and photonic machine learning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.03652v1-abstract-full').style.display = 'none'; document.getElementById('2302.03652v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.07908">arXiv:2301.07908</a> <span> [<a href="https://arxiv.org/pdf/2301.07908">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.37188/lam.2023.017">10.37188/lam.2023.017 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Quantitative phase imaging (QPI) through random diffusers using a diffractive optical network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuhang Li</a>, <a href="/search/physics?searchtype=author&query=Luo%2C+Y">Yi Luo</a>, <a href="/search/physics?searchtype=author&query=Mengu%2C+D">Deniz Mengu</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.07908v1-abstract-short" style="display: inline;"> Quantitative phase imaging (QPI) is a label-free computational imaging technique used in various fields, including biology and medical research. Modern QPI systems typically rely on digital processing using iterative algorithms for phase retrieval and image reconstruction. Here, we report a diffractive optical network trained to convert the phase information of input objects positioned behind rand… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.07908v1-abstract-full').style.display = 'inline'; document.getElementById('2301.07908v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.07908v1-abstract-full" style="display: none;"> Quantitative phase imaging (QPI) is a label-free computational imaging technique used in various fields, including biology and medical research. Modern QPI systems typically rely on digital processing using iterative algorithms for phase retrieval and image reconstruction. Here, we report a diffractive optical network trained to convert the phase information of input objects positioned behind random diffusers into intensity variations at the output plane, all-optically performing phase recovery and quantitative imaging of phase objects completely hidden by unknown, random phase diffusers. This QPI diffractive network is composed of successive diffractive layers, axially spanning in total ~70 wavelengths; unlike existing digital image reconstruction and phase retrieval methods, it forms an all-optical processor that does not require external power beyond the illumination beam to complete its QPI reconstruction at the speed of light propagation. This all-optical diffractive processor can provide a low-power, high frame rate and compact alternative for quantitative imaging of phase objects through random, unknown diffusers and can operate at different parts of the electromagnetic spectrum for various applications in biomedical imaging and sensing. The presented QPI diffractive designs can be integrated onto the active area of standard CCD/CMOS-based image sensors to convert an existing optical microscope into a diffractive QPI microscope, performing phase recovery and image reconstruction on a chip through light diffraction within passive structured layers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.07908v1-abstract-full').style.display = 'none'; document.getElementById('2301.07908v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">27 Pages, 7 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Light: Advanced Manufacturing (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.12873">arXiv:2212.12873</a> <span> [<a href="https://arxiv.org/pdf/2212.12873">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1002/adma.202212091">10.1002/adma.202212091 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Data class-specific all-optical transformations and encryption </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Wei%2C+H">Heming Wei</a>, <a href="/search/physics?searchtype=author&query=Yang%2C+X">Xilin Yang</a>, <a href="/search/physics?searchtype=author&query=Mengu%2C+D">Deniz Mengu</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.12873v1-abstract-short" style="display: inline;"> Diffractive optical networks provide rich opportunities for visual computing tasks since the spatial information of a scene can be directly accessed by a diffractive processor without requiring any digital pre-processing steps. Here we present data class-specific transformations all-optically performed between the input and output fields-of-view (FOVs) of a diffractive network. The visual informat… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.12873v1-abstract-full').style.display = 'inline'; document.getElementById('2212.12873v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.12873v1-abstract-full" style="display: none;"> Diffractive optical networks provide rich opportunities for visual computing tasks since the spatial information of a scene can be directly accessed by a diffractive processor without requiring any digital pre-processing steps. Here we present data class-specific transformations all-optically performed between the input and output fields-of-view (FOVs) of a diffractive network. The visual information of the objects is encoded into the amplitude (A), phase (P), or intensity (I) of the optical field at the input, which is all-optically processed by a data class-specific diffractive network. At the output, an image sensor-array directly measures the transformed patterns, all-optically encrypted using the transformation matrices pre-assigned to different data classes, i.e., a separate matrix for each data class. The original input images can be recovered by applying the correct decryption key (the inverse transformation) corresponding to the matching data class, while applying any other key will lead to loss of information. The class-specificity of these all-optical diffractive transformations creates opportunities where different keys can be distributed to different users; each user can only decode the acquired images of only one data class, serving multiple users in an all-optically encrypted manner. We numerically demonstrated all-optical class-specific transformations covering A-->A, I-->I, and P-->I transformations using various image datasets. We also experimentally validated the feasibility of this framework by fabricating a class-specific I-->I transformation diffractive network using two-photon polymerization and successfully tested it at 1550 nm wavelength. Data class-specific all-optical transformations provide a fast and energy-efficient method for image and data encryption, enhancing data security and privacy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.12873v1-abstract-full').style.display = 'none'; document.getElementById('2212.12873v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">27 Pages, 9 Figures, 1 Table</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Advanced Materials (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.02025">arXiv:2212.02025</a> <span> [<a href="https://arxiv.org/pdf/2212.02025">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1126/sciadv.adg1505">10.1126/sciadv.adg1505 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Unidirectional Imaging using Deep Learning-Designed Materials </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Li%2C+J">Jingxi Li</a>, <a href="/search/physics?searchtype=author&query=Gan%2C+T">Tianyi Gan</a>, <a href="/search/physics?searchtype=author&query=Zhao%2C+Y">Yifan Zhao</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Shen%2C+C">Che-Yung Shen</a>, <a href="/search/physics?searchtype=author&query=Sun%2C+S">Songyu Sun</a>, <a href="/search/physics?searchtype=author&query=Jarrahi%2C+M">Mona Jarrahi</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.02025v1-abstract-short" style="display: inline;"> A unidirectional imager would only permit image formation along one direction, from an input field-of-view (FOV) A to an output FOV B, and in the reverse path, the image formation would be blocked. Here, we report the first demonstration of unidirectional imagers, presenting polarization-insensitive and broadband unidirectional imaging based on successive diffractive layers that are linear and iso… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.02025v1-abstract-full').style.display = 'inline'; document.getElementById('2212.02025v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.02025v1-abstract-full" style="display: none;"> A unidirectional imager would only permit image formation along one direction, from an input field-of-view (FOV) A to an output FOV B, and in the reverse path, the image formation would be blocked. Here, we report the first demonstration of unidirectional imagers, presenting polarization-insensitive and broadband unidirectional imaging based on successive diffractive layers that are linear and isotropic. These diffractive layers are optimized using deep learning and consist of hundreds of thousands of diffractive phase features, which collectively modulate the incoming fields and project an intensity image of the input onto an output FOV, while blocking the image formation in the reverse direction. After their deep learning-based training, the resulting diffractive layers are fabricated to form a unidirectional imager. As a reciprocal device, the diffractive unidirectional imager has asymmetric mode processing capabilities in the forward and backward directions, where the optical modes from B to A are selectively guided/scattered to miss the output FOV, whereas for the forward direction such modal losses are minimized, yielding an ideal imaging system between the input and output FOVs. Although trained using monochromatic illumination, the diffractive unidirectional imager maintains its functionality over a large spectral band and works under broadband illumination. We experimentally validated this unidirectional imager using terahertz radiation, very well matching our numerical results. Using the same deep learning-based design strategy, we also created a wavelength-selective unidirectional imager, where two unidirectional imaging operations, in reverse directions, are multiplexed through different illumination wavelengths. Diffractive unidirectional imaging using structured materials will have numerous applications in e.g., security, defense, telecommunications and privacy protection. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.02025v1-abstract-full').style.display = 'none'; document.getElementById('2212.02025v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">27 Pages, 10 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Science Advances (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.06822">arXiv:2211.06822</a> <span> [<a href="https://arxiv.org/pdf/2211.06822">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41377-023-01104-7">10.1038/s41377-023-01104-7 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Deep Learning-enabled Virtual Histological Staining of Biological Samples </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Yang%2C+X">Xilin Yang</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuzhu Li</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Y">Yijie Zhang</a>, <a href="/search/physics?searchtype=author&query=Pillar%2C+N">Nir Pillar</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.06822v1-abstract-short" style="display: inline;"> Histological staining is the gold standard for tissue examination in clinical pathology and life-science research, which visualizes the tissue and cellular structures using chromatic dyes or fluorescence labels to aid the microscopic assessment of tissue. However, the current histological staining workflow requires tedious sample preparation steps, specialized laboratory infrastructure, and traine… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.06822v1-abstract-full').style.display = 'inline'; document.getElementById('2211.06822v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.06822v1-abstract-full" style="display: none;"> Histological staining is the gold standard for tissue examination in clinical pathology and life-science research, which visualizes the tissue and cellular structures using chromatic dyes or fluorescence labels to aid the microscopic assessment of tissue. However, the current histological staining workflow requires tedious sample preparation steps, specialized laboratory infrastructure, and trained histotechnologists, making it expensive, time-consuming, and not accessible in resource-limited settings. Deep learning techniques created new opportunities to revolutionize staining methods by digitally generating histological stains using trained neural networks, providing rapid, cost-effective, and accurate alternatives to standard chemical staining methods. These techniques, broadly referred to as virtual staining, were extensively explored by multiple research groups and demonstrated to be successful in generating various types of histological stains from label-free microscopic images of unstained samples; similar approaches were also used for transforming images of an already stained tissue sample into another type of stain, performing virtual stain-to-stain transformations. In this Review, we provide a comprehensive overview of the recent research advances in deep learning-enabled virtual histological staining techniques. The basic concepts and the typical workflow of virtual staining are introduced, followed by a discussion of representative works and their technical innovations. We also share our perspectives on the future of this emerging field, aiming to inspire readers from diverse scientific fields to further expand the scope of deep learning-enabled virtual histological staining techniques and their applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.06822v1-abstract-full').style.display = 'none'; document.getElementById('2211.06822v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">35 Pages, 7 Figures, 2 Tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Light: Science & Applications (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.14735">arXiv:2209.14735</a> <span> [<a href="https://arxiv.org/pdf/2209.14735">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Mesoscale and Nanoscale Physics">cond-mat.mes-hall</span> </div> </div> <p class="title is-5 mathjax"> Symmetry-compatible angular momentum conservation relation in plasmonic vortex lenses with rotational symmetries </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Yang%2C+J">Jie Yang</a>, <a href="/search/physics?searchtype=author&query=Feng%2C+P">Pengyi Feng</a>, <a href="/search/physics?searchtype=author&query=Han%2C+F">Fei Han</a>, <a href="/search/physics?searchtype=author&query=Zheng%2C+X">Xuezhi Zheng</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+J">Jiafu Wang</a>, <a href="/search/physics?searchtype=author&query=Jin%2C+Z">Zhongwei Jin</a>, <a href="/search/physics?searchtype=author&query=Verellen%2C+N">Niels Verellen</a>, <a href="/search/physics?searchtype=author&query=Janssens%2C+E">Ewald Janssens</a>, <a href="/search/physics?searchtype=author&query=Ni%2C+J">Jincheng Ni</a>, <a href="/search/physics?searchtype=author&query=Chen%2C+W">Weijin Chen</a>, <a href="/search/physics?searchtype=author&query=Yang%2C+Y">Yuanjie Yang</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+A">Anxue Zhang</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Benfeng Bai</a>, <a href="/search/physics?searchtype=author&query=Qiu%2C+C">Chengwei Qiu</a>, <a href="/search/physics?searchtype=author&query=Vandenbosch%2C+G+A+E">Guy A E Vandenbosch</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.14735v3-abstract-short" style="display: inline;"> Plasmonic vortex lenses (PVLs), producing vortex modes, known as plasmonic vortices (PVs), in the process of plasmonic spin-orbit coupling, provide a promising platform for the realization of many optical vortex-based applications. Very recently, it has been reported that a single PVL can generate multiple PVs. This work exploits the representation theory of finite groups, reveals the symmetry ori… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.14735v3-abstract-full').style.display = 'inline'; document.getElementById('2209.14735v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.14735v3-abstract-full" style="display: none;"> Plasmonic vortex lenses (PVLs), producing vortex modes, known as plasmonic vortices (PVs), in the process of plasmonic spin-orbit coupling, provide a promising platform for the realization of many optical vortex-based applications. Very recently, it has been reported that a single PVL can generate multiple PVs. This work exploits the representation theory of finite groups, reveals the symmetry origin of the generated PVs, and derives a new conservation relation based on symmetry principles. Specifically, the symmetry principles divide the near field of the PVL into regions, designate integers, which are the topological charges, to the regions, and, particularly, give an upper bound to the topological charge of the PV at the center of the PVL. Further application of the symmetry principles to the spin-orbit coupling process leads to a new conservation relation. Based on this relation, a two-step procedure is suggested to link the angular momentum of the incident field with the one of the generated PVs through the symmetries of the PVL. This theory is well demonstrated by numerical calculations. This work provides an alternative but essential symmetry perspective on the dynamics of spin-orbit coupling in PVLs, forms a strong complement for the physical investigations performed before, and therefore lays down a solid foundation for flexibly manipulating the PVs for emerging vortex-based nanophotonic applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.14735v3-abstract-full').style.display = 'none'; document.getElementById('2209.14735v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23 pages and 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.10362">arXiv:2208.10362</a> <span> [<a href="https://arxiv.org/pdf/2208.10362">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1117/1.AP.5.1.016003">10.1117/1.AP.5.1.016003 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Massively Parallel Universal Linear Transformations using a Wavelength-Multiplexed Diffractive Optical Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Li%2C+J">Jingxi Li</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Luo%2C+Y">Yi Luo</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.10362v1-abstract-short" style="display: inline;"> We report deep learning-based design of a massively parallel broadband diffractive neural network for all-optically performing a large group of arbitrarily-selected, complex-valued linear transformations between an input and output field-of-view, each with N_i and N_o pixels, respectively. This broadband diffractive processor is composed of N_w wavelength channels, each of which is uniquely assign… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.10362v1-abstract-full').style.display = 'inline'; document.getElementById('2208.10362v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.10362v1-abstract-full" style="display: none;"> We report deep learning-based design of a massively parallel broadband diffractive neural network for all-optically performing a large group of arbitrarily-selected, complex-valued linear transformations between an input and output field-of-view, each with N_i and N_o pixels, respectively. This broadband diffractive processor is composed of N_w wavelength channels, each of which is uniquely assigned to a distinct target transformation. A large set of arbitrarily-selected linear transformations can be individually performed through the same diffractive network at different illumination wavelengths, either simultaneously or sequentially (wavelength scanning). We demonstrate that such a broadband diffractive network, regardless of its material dispersion, can successfully approximate N_w unique complex-valued linear transforms with a negligible error when the number of diffractive neurons (N) in its design matches or exceeds 2 x N_w x N_i x N_o. We further report that the spectral multiplexing capability (N_w) can be increased by increasing N; our numerical analyses confirm these conclusions for N_w > 180, which can be further increased to e.g., ~2000 depending on the upper bound of the approximation error. Massively parallel, wavelength-multiplexed diffractive networks will be useful for designing high-throughput intelligent machine vision systems and hyperspectral processors that can perform statistical inference and analyze objects/scenes with unique spectral properties. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.10362v1-abstract-full').style.display = 'none'; document.getElementById('2208.10362v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">30 Pages, 9 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Advanced Photonics (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.03968">arXiv:2208.03968</a> <span> [<a href="https://arxiv.org/pdf/2208.03968">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41377-023-01116-3">10.1038/s41377-023-01116-3 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> All-optical image classification through unknown random diffusers using a single-pixel diffractive network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Luo%2C+Y">Yi Luo</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuhang Li</a>, <a href="/search/physics?searchtype=author&query=Cetintas%2C+E">Ege Cetintas</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.03968v1-abstract-short" style="display: inline;"> Classification of an object behind a random and unknown scattering medium sets a challenging task for computational imaging and machine vision fields. Recent deep learning-based approaches demonstrated the classification of objects using diffuser-distorted patterns collected by an image sensor. These methods demand relatively large-scale computing using deep neural networks running on digital comp… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.03968v1-abstract-full').style.display = 'inline'; document.getElementById('2208.03968v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.03968v1-abstract-full" style="display: none;"> Classification of an object behind a random and unknown scattering medium sets a challenging task for computational imaging and machine vision fields. Recent deep learning-based approaches demonstrated the classification of objects using diffuser-distorted patterns collected by an image sensor. These methods demand relatively large-scale computing using deep neural networks running on digital computers. Here, we present an all-optical processor to directly classify unknown objects through unknown, random phase diffusers using broadband illumination detected with a single pixel. A set of transmissive diffractive layers, optimized using deep learning, forms a physical network that all-optically maps the spatial information of an input object behind a random diffuser into the power spectrum of the output light detected through a single pixel at the output plane of the diffractive network. We numerically demonstrated the accuracy of this framework using broadband radiation to classify unknown handwritten digits through random new diffusers, never used during the training phase, and achieved a blind testing accuracy of 88.53%. This single-pixel all-optical object classification system through random diffusers is based on passive diffractive layers that process broadband input light and can operate at any part of the electromagnetic spectrum by simply scaling the diffractive features proportional to the wavelength range of interest. These results have various potential applications in, e.g., biomedical imaging, security, robotics, and autonomous driving. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.03968v1-abstract-full').style.display = 'none'; document.getElementById('2208.03968v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">28 Pages, 6 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Light: Science & Applications (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.06578">arXiv:2207.06578</a> <span> [<a href="https://arxiv.org/pdf/2207.06578">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1021/acsphotonics.2c00932">10.1021/acsphotonics.2c00932 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Virtual stain transfer in histology via cascaded deep neural networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Yang%2C+X">Xilin Yang</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Y">Yijie Zhang</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuzhu Li</a>, <a href="/search/physics?searchtype=author&query=de+Haan%2C+K">Kevin de Haan</a>, <a href="/search/physics?searchtype=author&query=Liu%2C+T">Tairan Liu</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.06578v1-abstract-short" style="display: inline;"> Pathological diagnosis relies on the visual inspection of histologically stained thin tissue specimens, where different types of stains are applied to bring contrast to and highlight various desired histological features. However, the destructive histochemical staining procedures are usually irreversible, making it very difficult to obtain multiple stains on the same tissue section. Here, we demon… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.06578v1-abstract-full').style.display = 'inline'; document.getElementById('2207.06578v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.06578v1-abstract-full" style="display: none;"> Pathological diagnosis relies on the visual inspection of histologically stained thin tissue specimens, where different types of stains are applied to bring contrast to and highlight various desired histological features. However, the destructive histochemical staining procedures are usually irreversible, making it very difficult to obtain multiple stains on the same tissue section. Here, we demonstrate a virtual stain transfer framework via a cascaded deep neural network (C-DNN) to digitally transform hematoxylin and eosin (H&E) stained tissue images into other types of histological stains. Unlike a single neural network structure which only takes one stain type as input to digitally output images of another stain type, C-DNN first uses virtual staining to transform autofluorescence microscopy images into H&E and then performs stain transfer from H&E to the domain of the other stain in a cascaded manner. This cascaded structure in the training phase allows the model to directly exploit histochemically stained image data on both H&E and the target special stain of interest. This advantage alleviates the challenge of paired data acquisition and improves the image quality and color accuracy of the virtual stain transfer from H&E to another stain. We validated the superior performance of this C-DNN approach using kidney needle core biopsy tissue sections and successfully transferred the H&E-stained tissue images into virtual PAS (periodic acid-Schiff) stain. This method provides high-quality virtual images of special stains using existing, histochemically stained slides and creates new opportunities in digital pathology by performing highly accurate stain-to-stain transformations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.06578v1-abstract-full').style.display = 'none'; document.getElementById('2207.06578v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 Pages, 4 Figures, 1 Table</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> ACS Photonics (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.00089">arXiv:2207.00089</a> <span> [<a href="https://arxiv.org/pdf/2207.00089">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41551-023-01057-7">10.1038/s41551-023-01057-7 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Rapid and stain-free quantification of viral plaque via lens-free holography and deep learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Liu%2C+T">Tairan Liu</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuzhu Li</a>, <a href="/search/physics?searchtype=author&query=Koydemir%2C+H+C">Hatice Ceylan Koydemir</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Y">Yijie Zhang</a>, <a href="/search/physics?searchtype=author&query=Yang%2C+E">Ethan Yang</a>, <a href="/search/physics?searchtype=author&query=Eryilmaz%2C+M">Merve Eryilmaz</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+H">Hongda Wang</a>, <a href="/search/physics?searchtype=author&query=Li%2C+J">Jingxi Li</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Ma%2C+G">Guangdong Ma</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.00089v2-abstract-short" style="display: inline;"> We present a rapid and stain-free quantitative viral plaque assay using lensfree holographic imaging and deep learning. This cost-effective, compact, and automated device significantly reduces the incubation time needed for traditional plaque assays while preserving their advantages over other virus quantification methods. This device captures ~0.32 Giga-pixel/hour phase information of the objects… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.00089v2-abstract-full').style.display = 'inline'; document.getElementById('2207.00089v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.00089v2-abstract-full" style="display: none;"> We present a rapid and stain-free quantitative viral plaque assay using lensfree holographic imaging and deep learning. This cost-effective, compact, and automated device significantly reduces the incubation time needed for traditional plaque assays while preserving their advantages over other virus quantification methods. This device captures ~0.32 Giga-pixel/hour phase information of the objects per test well, covering an area of ~30x30 mm^2, in a label-free manner, eliminating staining entirely. We demonstrated the success of this computational method using vesicular stomatitis virus (VSV), herpes simplex virus (HSV-1) and encephalomyocarditis virus (EMCV). Using a neural network, this stain-free device automatically detected the first cell lysing events due to the VSV viral replication as early as 5 hours after the incubation, and achieved >90% detection rate for the VSV plaque-forming units (PFUs) with 100% specificity in <20 hours, providing major time savings compared to the traditional plaque assays that take at least 48 hours. Similarly, this stain-free device reduced the needed incubation time by ~48 hours for HSV-1 and ~20 hours for EMCV, achieving >90% detection rate with 100% specificity. We also demonstrated that this data-driven plaque assay offers the capability of quantifying the infected area of the cell monolayer, performing automated counting and quantification of PFUs and virus-infected areas over a 10-fold larger dynamic range of virus concentration than standard viral plaque assays. This compact, low-cost, automated PFU quantification device can be broadly used in virology research, vaccine development, and clinical applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.00089v2-abstract-full').style.display = 'none'; document.getElementById('2207.00089v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 Pages, 6 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Nature Biomedical Engineering (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.13122">arXiv:2205.13122</a> <span> [<a href="https://arxiv.org/pdf/2205.13122">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1186/s43593-022-00021-3">10.1186/s43593-022-00021-3 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> To image, or not to image: Class-specific diffractive cameras with all-optical erasure of undesired objects </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Luo%2C+Y">Yi Luo</a>, <a href="/search/physics?searchtype=author&query=Gan%2C+T">Tianyi Gan</a>, <a href="/search/physics?searchtype=author&query=Hu%2C+J">Jingtian Hu</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuhang Li</a>, <a href="/search/physics?searchtype=author&query=Zhao%2C+Y">Yifan Zhao</a>, <a href="/search/physics?searchtype=author&query=Mengu%2C+D">Deniz Mengu</a>, <a href="/search/physics?searchtype=author&query=Jarrahi%2C+M">Mona Jarrahi</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.13122v1-abstract-short" style="display: inline;"> Privacy protection is a growing concern in the digital era, with machine vision techniques widely used throughout public and private settings. Existing methods address this growing problem by, e.g., encrypting camera images or obscuring/blurring the imaged information through digital algorithms. Here, we demonstrate a camera design that performs class-specific imaging of target objects with instan… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.13122v1-abstract-full').style.display = 'inline'; document.getElementById('2205.13122v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.13122v1-abstract-full" style="display: none;"> Privacy protection is a growing concern in the digital era, with machine vision techniques widely used throughout public and private settings. Existing methods address this growing problem by, e.g., encrypting camera images or obscuring/blurring the imaged information through digital algorithms. Here, we demonstrate a camera design that performs class-specific imaging of target objects with instantaneous all-optical erasure of other classes of objects. This diffractive camera consists of transmissive surfaces structured using deep learning to perform selective imaging of target classes of objects positioned at its input field-of-view. After their fabrication, the thin diffractive layers collectively perform optical mode filtering to accurately form images of the objects that belong to a target data class or group of classes, while instantaneously erasing objects of the other data classes at the output field-of-view. Using the same framework, we also demonstrate the design of class-specific permutation cameras, where the objects of a target data class are pixel-wise permuted for all-optical class-specific encryption, while the other objects are irreversibly erased from the output image. The success of class-specific diffractive cameras was experimentally demonstrated using terahertz (THz) waves and 3D-printed diffractive layers that selectively imaged only one class of the MNIST handwritten digit dataset, all-optically erasing the other handwritten digits. This diffractive camera design can be scaled to different parts of the electromagnetic spectrum, including, e.g., the visible and infrared wavelengths, to provide transformative opportunities for privacy-preserving digital cameras and task-specific data-efficient imaging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.13122v1-abstract-full').style.display = 'none'; document.getElementById('2205.13122v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">31 Pages, 7 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> eLight (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.03549">arXiv:2205.03549</a> <span> [<a href="https://arxiv.org/pdf/2205.03549">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1021/acsphotonics.2c00572">10.1021/acsphotonics.2c00572 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Deep Learning-enabled Detection and Classification of Bacterial Colonies using a Thin Film Transistor (TFT) Image Sensor </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuzhu Li</a>, <a href="/search/physics?searchtype=author&query=Liu%2C+T">Tairan Liu</a>, <a href="/search/physics?searchtype=author&query=Koydemir%2C+H+C">Hatice Ceylan Koydemir</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+H">Hongda Wang</a>, <a href="/search/physics?searchtype=author&query=O%27Riordan%2C+K">Keelan O'Riordan</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Haga%2C+Y">Yuta Haga</a>, <a href="/search/physics?searchtype=author&query=Kobashi%2C+J">Junji Kobashi</a>, <a href="/search/physics?searchtype=author&query=Tanaka%2C+H">Hitoshi Tanaka</a>, <a href="/search/physics?searchtype=author&query=Tamaru%2C+T">Takaya Tamaru</a>, <a href="/search/physics?searchtype=author&query=Yamaguchi%2C+K">Kazunori Yamaguchi</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.03549v1-abstract-short" style="display: inline;"> Early detection and identification of pathogenic bacteria such as Escherichia coli (E. coli) is an essential task for public health. The conventional culture-based methods for bacterial colony detection usually take >24 hours to get the final read-out. Here, we demonstrate a bacterial colony-forming-unit (CFU) detection system exploiting a thin-film-transistor (TFT)-based image sensor array that s… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.03549v1-abstract-full').style.display = 'inline'; document.getElementById('2205.03549v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.03549v1-abstract-full" style="display: none;"> Early detection and identification of pathogenic bacteria such as Escherichia coli (E. coli) is an essential task for public health. The conventional culture-based methods for bacterial colony detection usually take >24 hours to get the final read-out. Here, we demonstrate a bacterial colony-forming-unit (CFU) detection system exploiting a thin-film-transistor (TFT)-based image sensor array that saves ~12 hours compared to the Environmental Protection Agency (EPA)-approved methods. To demonstrate the efficacy of this CFU detection system, a lensfree imaging modality was built using the TFT image sensor with a sample field-of-view of ~10 cm^2. Time-lapse images of bacterial colonies cultured on chromogenic agar plates were automatically collected at 5-minute intervals. Two deep neural networks were used to detect and count the growing colonies and identify their species. When blindly tested with 265 colonies of E. coli and other coliform bacteria (i.e., Citrobacter and Klebsiella pneumoniae), our system reached an average CFU detection rate of 97.3% at 9 hours of incubation and an average recovery rate of 91.6% at ~12 hours. This TFT-based sensor can be applied to various microbiological detection methods. Due to the large scalability, ultra-large field-of-view, and low cost of the TFT-based image sensors, this platform can be integrated with each agar plate to be tested and disposed of after the automated CFU count. The imaging field-of-view of this platform can be cost-effectively increased to >100 cm^2 to provide a massive throughput for CFU detection using, e.g., roll-to-roll manufacturing of TFTs as used in the flexible display industry. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.03549v1-abstract-full').style.display = 'none'; document.getElementById('2205.03549v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 Pages, 6 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> ACS Photonics (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.00428">arXiv:2205.00428</a> <span> [<a href="https://arxiv.org/pdf/2205.00428">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/JSTQE.2022.3194574">10.1109/JSTQE.2022.3194574 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Analysis of Diffractive Neural Networks for Seeing Through Random Diffusers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuhang Li</a>, <a href="/search/physics?searchtype=author&query=Luo%2C+Y">Yi Luo</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.00428v1-abstract-short" style="display: inline;"> Imaging through diffusive media is a challenging problem, where the existing solutions heavily rely on digital computers to reconstruct distorted images. We provide a detailed analysis of a computer-free, all-optical imaging method for seeing through random, unknown phase diffusers using diffractive neural networks, covering different deep learning-based training strategies. By analyzing various d… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.00428v1-abstract-full').style.display = 'inline'; document.getElementById('2205.00428v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.00428v1-abstract-full" style="display: none;"> Imaging through diffusive media is a challenging problem, where the existing solutions heavily rely on digital computers to reconstruct distorted images. We provide a detailed analysis of a computer-free, all-optical imaging method for seeing through random, unknown phase diffusers using diffractive neural networks, covering different deep learning-based training strategies. By analyzing various diffractive networks designed to image through random diffusers with different correlation lengths, a trade-off between the image reconstruction fidelity and distortion reduction capability of the diffractive network was observed. During its training, random diffusers with a range of correlation lengths were used to improve the diffractive network's generalization performance. Increasing the number of random diffusers used in each epoch reduced the overfitting of the diffractive network's imaging performance to known diffusers. We also demonstrated that the use of additional diffractive layers improved the generalization capability to see through new, random diffusers. Finally, we introduced deliberate misalignments in training to 'vaccinate' the network against random layer-to-layer shifts that might arise due to the imperfect assembly of the diffractive networks. These analyses provide a comprehensive guide in designing diffractive networks to see through random diffusers, which might profoundly impact many fields, such as biomedical imaging, atmospheric physics, and autonomous driving. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.00428v1-abstract-full').style.display = 'none'; document.getElementById('2205.00428v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">42 Pages, 9 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Journal of Selected Topics in Quantum Electronics (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.05240">arXiv:2112.05240</a> <span> [<a href="https://arxiv.org/pdf/2112.05240">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.34133/2022/9786242">10.34133/2022/9786242 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Label-free virtual HER2 immunohistochemical staining of breast tissue using deep learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+H">Hongda Wang</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Y">Yuzhu Li</a>, <a href="/search/physics?searchtype=author&query=de+Haan%2C+K">Kevin de Haan</a>, <a href="/search/physics?searchtype=author&query=Colonnese%2C+F">Francesco Colonnese</a>, <a href="/search/physics?searchtype=author&query=Wan%2C+Y">Yujie Wan</a>, <a href="/search/physics?searchtype=author&query=Zuo%2C+J">Jingyi Zuo</a>, <a href="/search/physics?searchtype=author&query=Doan%2C+N+B">Ngan B. Doan</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+X">Xiaoran Zhang</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Y">Yijie Zhang</a>, <a href="/search/physics?searchtype=author&query=Li%2C+J">Jingxi Li</a>, <a href="/search/physics?searchtype=author&query=Dong%2C+W">Wenjie Dong</a>, <a href="/search/physics?searchtype=author&query=Darrow%2C+M+A">Morgan Angus Darrow</a>, <a href="/search/physics?searchtype=author&query=Kamangar%2C+E">Elham Kamangar</a>, <a href="/search/physics?searchtype=author&query=Lee%2C+H+S">Han Sung Lee</a>, <a href="/search/physics?searchtype=author&query=Rivenson%2C+Y">Yair Rivenson</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.05240v1-abstract-short" style="display: inline;"> The immunohistochemical (IHC) staining of the human epidermal growth factor receptor 2 (HER2) biomarker is widely practiced in breast tissue analysis, preclinical studies and diagnostic decisions, guiding cancer treatment and investigation of pathogenesis. HER2 staining demands laborious tissue treatment and chemical processing performed by a histotechnologist, which typically takes one day to pre… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.05240v1-abstract-full').style.display = 'inline'; document.getElementById('2112.05240v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.05240v1-abstract-full" style="display: none;"> The immunohistochemical (IHC) staining of the human epidermal growth factor receptor 2 (HER2) biomarker is widely practiced in breast tissue analysis, preclinical studies and diagnostic decisions, guiding cancer treatment and investigation of pathogenesis. HER2 staining demands laborious tissue treatment and chemical processing performed by a histotechnologist, which typically takes one day to prepare in a laboratory, increasing analysis time and associated costs. Here, we describe a deep learning-based virtual HER2 IHC staining method using a conditional generative adversarial network that is trained to rapidly transform autofluorescence microscopic images of unlabeled/label-free breast tissue sections into bright-field equivalent microscopic images, matching the standard HER2 IHC staining that is chemically performed on the same tissue sections. The efficacy of this virtual HER2 staining framework was demonstrated by quantitative analysis, in which three board-certified breast pathologists blindly graded the HER2 scores of virtually stained and immunohistochemically stained HER2 whole slide images (WSIs) to reveal that the HER2 scores determined by inspecting virtual IHC images are as accurate as their immunohistochemically stained counterparts. A second quantitative blinded study performed by the same diagnosticians further revealed that the virtually stained HER2 images exhibit a comparable staining quality in the level of nuclear detail, membrane clearness, and absence of staining artifacts with respect to their immunohistochemically stained counterparts. This virtual HER2 staining framework bypasses the costly, laborious, and time-consuming IHC staining procedures in laboratory, and can be extended to other types of biomarkers to accelerate the IHC tissue staining used in life sciences and biomedical workflow. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.05240v1-abstract-full').style.display = 'none'; document.getElementById('2112.05240v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">26 Pages, 5 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> BME Frontiers (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.02501">arXiv:2112.02501</a> <span> [<a href="https://arxiv.org/pdf/2112.02501">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> </div> <p class="title is-5 mathjax"> Incoherent Optoelectronic Differentiation with Optimized Multilayer Films </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Zhang%2C+X">Xiaomeng Zhang</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Benfeng Bai</a>, <a href="/search/physics?searchtype=author&query=Sun%2C+H">Hong-Bo Sun</a>, <a href="/search/physics?searchtype=author&query=Jin%2C+G">Guofan Jin</a>, <a href="/search/physics?searchtype=author&query=Valentine%2C+J">Jason Valentine</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.02501v1-abstract-short" style="display: inline;"> Fourier-based optical computing operations, such as spatial differentiation, have recently been realized in compact form factors using flat optics. Experimental demonstrations, however, have been limited to coherent light requiring laser illumination and leading to speckle noise and unwanted interference fringes. Here, we demonstrate the use of optimized multilayer films, combined with dual color… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.02501v1-abstract-full').style.display = 'inline'; document.getElementById('2112.02501v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.02501v1-abstract-full" style="display: none;"> Fourier-based optical computing operations, such as spatial differentiation, have recently been realized in compact form factors using flat optics. Experimental demonstrations, however, have been limited to coherent light requiring laser illumination and leading to speckle noise and unwanted interference fringes. Here, we demonstrate the use of optimized multilayer films, combined with dual color image subtraction, to realize differentiation with unpolarized incoherent light. Global optimization is achieved by employing neural networks combined with the reconciled level set method to optimize the optical transfer functions of multilayer films at wavelengths of 532 nm and 633 nm. Spatial differentiation is then achieved by subtracting the normalized incoherent images at these two wavelengths. The optimized multilayer films are experimentally demonstrated to achieve incoherent differentiation with a numerical aperture up to 0.8 and a resolution of 6.2 渭m. The use of multilayer films allows for lithography-free fabrication and is easily combined with existing imaging systems opening the door to applications in microscopy, machine vision and other image processing applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.02501v1-abstract-full').style.display = 'none'; document.getElementById('2112.02501v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages,5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.12856">arXiv:2110.12856</a> <span> [<a href="https://arxiv.org/pdf/2110.12856">pdf</a>, <a href="https://arxiv.org/format/2110.12856">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41586-022-04579-3">10.1038/s41586-022-04579-3 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Bridging microcombs and silicon photonic engines for optoelectronics systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Shu%2C+H">Haowen Shu</a>, <a href="/search/physics?searchtype=author&query=Chang%2C+L">Lin Chang</a>, <a href="/search/physics?searchtype=author&query=Tao%2C+Y">Yuansheng Tao</a>, <a href="/search/physics?searchtype=author&query=Shen%2C+B">Bitao Shen</a>, <a href="/search/physics?searchtype=author&query=Xie%2C+W">Weiqiang Xie</a>, <a href="/search/physics?searchtype=author&query=Jin%2C+M">Ming Jin</a>, <a href="/search/physics?searchtype=author&query=Netherton%2C+A">Andrew Netherton</a>, <a href="/search/physics?searchtype=author&query=Tao%2C+Z">Zihan Tao</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+X">Xuguang Zhang</a>, <a href="/search/physics?searchtype=author&query=Chen%2C+R">Ruixuan Chen</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bowen Bai</a>, <a href="/search/physics?searchtype=author&query=Qin%2C+J">Jun Qin</a>, <a href="/search/physics?searchtype=author&query=Yu%2C+S">Shaohua Yu</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+X">Xingjun Wang</a>, <a href="/search/physics?searchtype=author&query=Bowers%2C+J+E">John E. Bowers</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.12856v1-abstract-short" style="display: inline;"> Microcombs have sparked a surge of applications over the last decade, ranging from optical communications to metrology. Despite their diverse deployment, most microcomb-based systems rely on a tremendous amount of bulk equipment to fulfill their desired functions, which is rather complicated, expensive and power-consuming. On the other hand, foundry-based silicon photonics (SiPh) has had remarkabl… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.12856v1-abstract-full').style.display = 'inline'; document.getElementById('2110.12856v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.12856v1-abstract-full" style="display: none;"> Microcombs have sparked a surge of applications over the last decade, ranging from optical communications to metrology. Despite their diverse deployment, most microcomb-based systems rely on a tremendous amount of bulk equipment to fulfill their desired functions, which is rather complicated, expensive and power-consuming. On the other hand, foundry-based silicon photonics (SiPh) has had remarkable success in providing versatile functionality in a scalable and low-cost manner, but its available chip-based light sources lack the capacity for parallelization, which limits the scope of SiPh applications. Here, we bridge these two technologies by using a power-efficient and operationally-simple AlGaAs on insulator microcomb source to drive CMOS SiPh engines. We present two important chip-scale photonic systems for optical data transmissions and microwave photonics respectively: The first microcomb-based integrated photonic data link is demonstrated, based on a pulse-amplitude 4-level modulation scheme with 2 Tbps aggregate rate, and a highly reconfigurable microwave photonic filter with unprecedented integration level is constructed, using a time stretch scheme. Such synergy of microcomb and SiPh integrated components is an essential step towards the next generation of fully integrated photonic systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.12856v1-abstract-full').style.display = 'none'; document.getElementById('2110.12856v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 4 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Nature 605, 457-463 (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.07933">arXiv:2106.07933</a> <span> [<a href="https://arxiv.org/pdf/2106.07933">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Classical Physics">physics.class-ph</span> </div> </div> <p class="title is-5 mathjax"> Micro/nanoliter droplet extraction by controlling acoustic vortex with miniwatt </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Zhang%2C+H">Han Zhang</a>, <a href="/search/physics?searchtype=author&query=Yang%2C+J">Jun Yang</a>, <a href="/search/physics?searchtype=author&query=Zhou%2C+Y">Yun Zhou</a>, <a href="/search/physics?searchtype=author&query=Zheng%2C+J">Jianfeng Zheng</a>, <a href="/search/physics?searchtype=author&query=Cheng%2C+Y">Yong Cheng</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bichao Bai</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+G">Guoxin Zhang</a>, <a href="/search/physics?searchtype=author&query=Lv%2C+Y">Yisheng Lv</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.07933v2-abstract-short" style="display: inline;"> Micro/nanoliter droplet is capable of achieving versatile applications with tiny volume and substantial surface energy, which is a big plus over bulk liquid. Yet, the contradiction of elaborate manipulation and enough power is still a challenge. Here, we unleash the potential of our miniwatt aspirators pumping up liquid and creating droplets with the help of acoustic vortex beams, inspired by the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.07933v2-abstract-full').style.display = 'inline'; document.getElementById('2106.07933v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.07933v2-abstract-full" style="display: none;"> Micro/nanoliter droplet is capable of achieving versatile applications with tiny volume and substantial surface energy, which is a big plus over bulk liquid. Yet, the contradiction of elaborate manipulation and enough power is still a challenge. Here, we unleash the potential of our miniwatt aspirators pumping up liquid and creating droplets with the help of acoustic vortex beams, inspired by the power mechanism that spirals are significant for most mollusks that live in water. These droplet aspirators produce very large interface deformations by small radiation pressures with orbit angular momentum from spiral-electrode transducers. The precisely contactless manipulation of physical, chemical and biological objects at micrometric down to nanometric scales, promises tremendous development in fields as diverse as microrobotics, nanoreactors, or nanoassemblies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.07933v2-abstract-full').style.display = 'none'; document.getElementById('2106.07933v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.13518">arXiv:2105.13518</a> <span> [<a href="https://arxiv.org/pdf/2105.13518">pdf</a>, <a href="https://arxiv.org/format/2105.13518">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1063/5.0056027">10.1063/5.0056027 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> 18.8 Gbps real-time quantum random number generator with a photonic integrated chip </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bing Bai</a>, <a href="/search/physics?searchtype=author&query=Huang%2C+J">Jianyao Huang</a>, <a href="/search/physics?searchtype=author&query=Qiao%2C+G">Guan-Ru Qiao</a>, <a href="/search/physics?searchtype=author&query=Nie%2C+Y">You-Qi Nie</a>, <a href="/search/physics?searchtype=author&query=Tang%2C+W">Weijie Tang</a>, <a href="/search/physics?searchtype=author&query=Chu%2C+T">Tao Chu</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+J">Jun Zhang</a>, <a href="/search/physics?searchtype=author&query=Pan%2C+J">Jian-Wei Pan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.13518v1-abstract-short" style="display: inline;"> Quantum random number generators (QRNGs) can produce true random numbers. Yet, the two most important QRNG parameters highly desired for practical applications, i.e., speed and size, have to be compromised during implementations. Here, we present the fastest and miniaturized QRNG with a record real-time output rate as high as 18.8 Gbps by combining a photonic integrated chip and the technology of… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.13518v1-abstract-full').style.display = 'inline'; document.getElementById('2105.13518v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.13518v1-abstract-full" style="display: none;"> Quantum random number generators (QRNGs) can produce true random numbers. Yet, the two most important QRNG parameters highly desired for practical applications, i.e., speed and size, have to be compromised during implementations. Here, we present the fastest and miniaturized QRNG with a record real-time output rate as high as 18.8 Gbps by combining a photonic integrated chip and the technology of optimized randomness extraction. We assemble the photonic integrated circuit designed for vacuum state QRNG implementation, InGaAs homodyne detector and high-bandwidth transimpedance amplifier into a single chip using hybrid packaging, which exhibits the excellent characteristics of integration and high-frequency response. With a sample rate of 2.5 GSa/s in a 10-bit analog-to-digital converter and subsequent paralleled postprocessing in a field programmable gate array, the QRNG outputs ultrafast random bitstreams via a fiber optic transceiver, whose real-time speed is validated in a personal computer. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.13518v1-abstract-full').style.display = 'none'; document.getElementById('2105.13518v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 4 figures. Accepted for publication in Applied Physics Letters</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Appl. Phys. Lett. 118, 264001 (2021) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.12122">arXiv:2105.12122</a> <span> [<a href="https://arxiv.org/pdf/2105.12122">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Emerging Technologies">cs.ET</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41377-021-00666-8">10.1038/s41377-021-00666-8 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Optical coherent dot-product chip for sophisticated deep learning regression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Xu%2C+S">Shaofu Xu</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+J">Jing Wang</a>, <a href="/search/physics?searchtype=author&query=Shu%2C+H">Haowen Shu</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Z">Zhike Zhang</a>, <a href="/search/physics?searchtype=author&query=Yi%2C+S">Sicheng Yi</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bowen Bai</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+X">Xingjun Wang</a>, <a href="/search/physics?searchtype=author&query=Liu%2C+J">Jianguo Liu</a>, <a href="/search/physics?searchtype=author&query=Zou%2C+W">Weiwen Zou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.12122v3-abstract-short" style="display: inline;"> Optical implementations of neural networks (ONNs) herald the next-generation high-speed and energy-efficient deep learning computing by harnessing the technical advantages of large bandwidth and high parallelism of optics. However, due to the problems of incomplete numerical domain, limited hardware scale, or inadequate numerical accuracy, the majority of existing ONNs were studied for basic class… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.12122v3-abstract-full').style.display = 'inline'; document.getElementById('2105.12122v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.12122v3-abstract-full" style="display: none;"> Optical implementations of neural networks (ONNs) herald the next-generation high-speed and energy-efficient deep learning computing by harnessing the technical advantages of large bandwidth and high parallelism of optics. However, due to the problems of incomplete numerical domain, limited hardware scale, or inadequate numerical accuracy, the majority of existing ONNs were studied for basic classification tasks. Given that regression is a fundamental form of deep learning and accounts for a large part of current artificial intelligence applications, it is necessary to master deep learning regression for further development and deployment of ONNs. Here, we demonstrate a silicon-based optical coherent dot-product chip (OCDC) capable of completing deep learning regression tasks. The OCDC adopts optical fields to carry out operations in complete real-value domain instead of in only positive domain. Via reusing, a single chip conducts matrix multiplications and convolutions in neural networks of any complexity. Also, hardware deviations are compensated via in-situ backpropagation control provided the simplicity of chip architecture. Therefore, the OCDC meets the requirements for sophisticated regression tasks and we successfully demonstrate a representative neural network, the AUTOMAP (a cutting-edge neural network model for image reconstruction). The quality of reconstructed images by the OCDC and a 32-bit digital computer is comparable. To the best of our knowledge, there is no precedent of performing such state-of-the-art regression tasks on ONN chip. It is anticipated that the OCDC can promote novel accomplishment of ONNs in modern AI applications including autonomous driving, natural language processing, and scientific study. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.12122v3-abstract-full').style.display = 'none'; document.getElementById('2105.12122v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Light: Science & Applications 10, 221 (2021) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.11735">arXiv:2105.11735</a> <span> [<a href="https://arxiv.org/pdf/2105.11735">pdf</a>, <a href="https://arxiv.org/format/2105.11735">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Fluid Dynamics">physics.flu-dyn</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Geophysics">physics.geo-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1017/jfm.2021.487">10.1017/jfm.2021.487 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Flocculation of suspended cohesive particles in homogeneous isotropic turbulence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Zhao%2C+K">K. Zhao</a>, <a href="/search/physics?searchtype=author&query=Pomes%2C+F">F. Pomes</a>, <a href="/search/physics?searchtype=author&query=Vowinckel%2C+B">B. Vowinckel</a>, <a href="/search/physics?searchtype=author&query=Hsu%2C+T+-">T. -J. Hsu</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">B. Bai</a>, <a href="/search/physics?searchtype=author&query=Meiburg%2C+E">E. Meiburg</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.11735v1-abstract-short" style="display: inline;"> We investigate the dynamics of cohesive particles in homogeneous isotropic turbulence, based on one-way coupled simulations that include Stokes drag, lubrication, cohesive and direct contact forces. We observe a transient flocculation phase characterized by a growing average floc size, followed by a statistically steady equilibrium phase. We analyze the temporal evolution of floc size and shape du… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.11735v1-abstract-full').style.display = 'inline'; document.getElementById('2105.11735v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.11735v1-abstract-full" style="display: none;"> We investigate the dynamics of cohesive particles in homogeneous isotropic turbulence, based on one-way coupled simulations that include Stokes drag, lubrication, cohesive and direct contact forces. We observe a transient flocculation phase characterized by a growing average floc size, followed by a statistically steady equilibrium phase. We analyze the temporal evolution of floc size and shape due to aggregation, breakage, and deformation. Larger turbulent shear and weaker cohesive forces yield elongated flocs that are smaller in size. Flocculation proceeds most rapidly when the fluid and particle time scales are balanced and a suitably defined Stokes number is \textit{O}(1). During the transient stage, cohesive forces of intermediate strength produce flocs of the largest size, as they are strong enough to cause aggregation, but not so strong as to pull the floc into a compact shape. Small Stokes numbers and weak turbulence delay the onset of the equilibrium stage. During equilibrium, stronger cohesive forces yield flocs of larger size. The equilibrium floc size distribution exhibits a preferred size that depends on the cohesive number. We observe that flocs are generally elongated by turbulent stresses before breakage. Flocs of size close to the Kolmogorov length scale preferentially align themselves with the intermediate strain direction and the vorticity vector. Flocs of smaller size tend to align themselves with the extensional strain direction. More generally, flocs are aligned with the strongest Lagrangian stretching direction. The Kolmogorov scale is seen to limit floc growth. We propose a new flocculation model with a variable fractal dimension that predicts the temporal evolution of the floc size and shape. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.11735v1-abstract-full').style.display = 'none'; document.getElementById('2105.11735v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted for publication in the Journal of Fluid Mechanics</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.14723">arXiv:2104.14723</a> <span> [<a href="https://arxiv.org/pdf/2104.14723">pdf</a>, <a href="https://arxiv.org/format/2104.14723">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1103/PhysRevLett.127.160502">10.1103/PhysRevLett.127.160502 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Measurement-Device-Independent Verification of a Quantum Memory </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Yu%2C+Y">Yong Yu</a>, <a href="/search/physics?searchtype=author&query=Sun%2C+P">Peng-Fei Sun</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Y">Yu-Zhe Zhang</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bing Bai</a>, <a href="/search/physics?searchtype=author&query=Fang%2C+Y">Yu-Qiang Fang</a>, <a href="/search/physics?searchtype=author&query=Luo%2C+X">Xi-Yu Luo</a>, <a href="/search/physics?searchtype=author&query=An%2C+Z">Zi-Ye An</a>, <a href="/search/physics?searchtype=author&query=Li%2C+J">Jun Li</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+J">Jun Zhang</a>, <a href="/search/physics?searchtype=author&query=Xu%2C+F">Feihu Xu</a>, <a href="/search/physics?searchtype=author&query=Bao%2C+X">Xiao-Hui Bao</a>, <a href="/search/physics?searchtype=author&query=Pan%2C+J">Jian-Wei Pan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.14723v1-abstract-short" style="display: inline;"> In this paper we report an experiment that verifies an atomic-ensemble quantum memory via a measurement-device-independent scheme. A single photon generated via Rydberg blockade in one atomic ensemble is stored in another atomic ensemble via electromagnetically induced transparency. After storage for a long duration, this photon is retrieved and interfered with a second photon to perform joint Bel… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.14723v1-abstract-full').style.display = 'inline'; document.getElementById('2104.14723v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.14723v1-abstract-full" style="display: none;"> In this paper we report an experiment that verifies an atomic-ensemble quantum memory via a measurement-device-independent scheme. A single photon generated via Rydberg blockade in one atomic ensemble is stored in another atomic ensemble via electromagnetically induced transparency. After storage for a long duration, this photon is retrieved and interfered with a second photon to perform joint Bell-state measurement (BSM). Quantum state for each photon is chosen based on a quantum random number generator respectively in each run. By evaluating correlations between the random states and BSM results, we certify that our memory is genuinely entanglement-preserving. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.14723v1-abstract-full').style.display = 'none'; document.getElementById('2104.14723v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.00741">arXiv:2007.00741</a> <span> [<a href="https://arxiv.org/pdf/2007.00741">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1021/acsphotonics.0c01051">10.1021/acsphotonics.0c01051 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Deep learning-based holographic polarization microscopy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Liu%2C+T">Tairan Liu</a>, <a href="/search/physics?searchtype=author&query=de+Haan%2C+K">Kevin de Haan</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Rivenson%2C+Y">Yair Rivenson</a>, <a href="/search/physics?searchtype=author&query=Luo%2C+Y">Yi Luo</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+H">Hongda Wang</a>, <a href="/search/physics?searchtype=author&query=Karalli%2C+D">David Karalli</a>, <a href="/search/physics?searchtype=author&query=Fu%2C+H">Hongxiang Fu</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Y">Yibo Zhang</a>, <a href="/search/physics?searchtype=author&query=FitzGerald%2C+J">John FitzGerald</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.00741v1-abstract-short" style="display: inline;"> Polarized light microscopy provides high contrast to birefringent specimen and is widely used as a diagnostic tool in pathology. However, polarization microscopy systems typically operate by analyzing images collected from two or more light paths in different states of polarization, which lead to relatively complex optical designs, high system costs or experienced technicians being required. Here,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.00741v1-abstract-full').style.display = 'inline'; document.getElementById('2007.00741v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.00741v1-abstract-full" style="display: none;"> Polarized light microscopy provides high contrast to birefringent specimen and is widely used as a diagnostic tool in pathology. However, polarization microscopy systems typically operate by analyzing images collected from two or more light paths in different states of polarization, which lead to relatively complex optical designs, high system costs or experienced technicians being required. Here, we present a deep learning-based holographic polarization microscope that is capable of obtaining quantitative birefringence retardance and orientation information of specimen from a phase recovered hologram, while only requiring the addition of one polarizer/analyzer pair to an existing holographic imaging system. Using a deep neural network, the reconstructed holographic images from a single state of polarization can be transformed into images equivalent to those captured using a single-shot computational polarized light microscope (SCPLM). Our analysis shows that a trained deep neural network can extract the birefringence information using both the sample specific morphological features as well as the holographic amplitude and phase distribution. To demonstrate the efficacy of this method, we tested it by imaging various birefringent samples including e.g., monosodium urate (MSU) and triamcinolone acetonide (TCA) crystals. Our method achieves similar results to SCPLM both qualitatively and quantitatively, and due to its simpler optical design and significantly larger field-of-view, this method has the potential to expand the access to polarization microscopy and its use for medical diagnosis in resource limited settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.00741v1-abstract-full').style.display = 'none'; document.getElementById('2007.00741v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 8 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> ACS Photonics (2020) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2001.10695">arXiv:2001.10695</a> <span> [<a href="https://arxiv.org/pdf/2001.10695">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41377-020-00358-9">10.1038/s41377-020-00358-9 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Early-detection and classification of live bacteria using time-lapse coherent imaging and deep learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Wang%2C+H">Hongda Wang</a>, <a href="/search/physics?searchtype=author&query=Koydemir%2C+H+C">Hatice Ceylan Koydemir</a>, <a href="/search/physics?searchtype=author&query=Qiu%2C+Y">Yunzhe Qiu</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Y">Yibo Zhang</a>, <a href="/search/physics?searchtype=author&query=Jin%2C+Y">Yiyin Jin</a>, <a href="/search/physics?searchtype=author&query=Tok%2C+S">Sabiha Tok</a>, <a href="/search/physics?searchtype=author&query=Yilmaz%2C+E+C">Enis Cagatay Yilmaz</a>, <a href="/search/physics?searchtype=author&query=Gumustekin%2C+E">Esin Gumustekin</a>, <a href="/search/physics?searchtype=author&query=Rivenson%2C+Y">Yair Rivenson</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2001.10695v1-abstract-short" style="display: inline;"> We present a computational live bacteria detection system that periodically captures coherent microscopy images of bacterial growth inside a 60 mm diameter agar-plate and analyzes these time-lapsed holograms using deep neural networks for rapid detection of bacterial growth and classification of the corresponding species. The performance of our system was demonstrated by rapid detection of Escheri… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.10695v1-abstract-full').style.display = 'inline'; document.getElementById('2001.10695v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2001.10695v1-abstract-full" style="display: none;"> We present a computational live bacteria detection system that periodically captures coherent microscopy images of bacterial growth inside a 60 mm diameter agar-plate and analyzes these time-lapsed holograms using deep neural networks for rapid detection of bacterial growth and classification of the corresponding species. The performance of our system was demonstrated by rapid detection of Escherichia coli and total coliform bacteria (i.e., Klebsiella aerogenes and Klebsiella pneumoniae subsp. pneumoniae) in water samples. These results were confirmed against gold-standard culture-based results, shortening the detection time of bacterial growth by >12 h as compared to the Environmental Protection Agency (EPA)-approved analytical methods. Our experiments further confirmed that this method successfully detects 90% of bacterial colonies within 7-10 h (and >95% within 12 h) with a precision of 99.2-100%, and correctly identifies their species in 7.6-12 h with 80% accuracy. Using pre-incubation of samples in growth media, our system achieved a limit of detection (LOD) of ~1 colony forming unit (CFU)/L within 9 h of total test time. This computational bacteria detection and classification platform is highly cost-effective (~$0.6 per test) and high-throughput with a scanning speed of 24 cm2/min over the entire plate surface, making it highly suitable for integration with the existing analytical methods currently used for bacteria detection on agar plates. Powered by deep learning, this automated and cost-effective live bacteria detection platform can be transformative for a wide range of applications in microbiology by significantly reducing the detection time, also automating the identification of colonies, without labeling or the need for an expert. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.10695v1-abstract-full').style.display = 'none'; document.getElementById('2001.10695v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 January, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 6 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Light: Science & Applications (2020) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2001.06890">arXiv:2001.06890</a> <span> [<a href="https://arxiv.org/pdf/2001.06890">pdf</a>, <a href="https://arxiv.org/ps/2001.06890">ps</a>, <a href="https://arxiv.org/format/2001.06890">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Fluid Dynamics">physics.flu-dyn</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Geophysics">physics.geo-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1017/jfm.2020.79">10.1017/jfm.2020.79 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> An efficient cellular flow model for cohesive particle flocculation in turbulence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Zhao%2C+K">K. Zhao</a>, <a href="/search/physics?searchtype=author&query=Vowinckel%2C+B">B. Vowinckel</a>, <a href="/search/physics?searchtype=author&query=Hsu%2C+T+-">T. -J. Hsu</a>, <a href="/search/physics?searchtype=author&query=K%C3%B6llner%2C+T">T. K枚llner</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">B. Bai</a>, <a href="/search/physics?searchtype=author&query=Meiburg%2C+E">E. Meiburg</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2001.06890v1-abstract-short" style="display: inline;"> We propose a one-way coupled model that tracks individual primary particles in a conceptually simple cellular flow setup to predict flocculation in turbulence. This computationally efficient model accounts for Stokes drag, lubrication, cohesive and direct contact forces on the primary spherical particles and allows for a systematic simulation campaign that yields the transient mean floc size as a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.06890v1-abstract-full').style.display = 'inline'; document.getElementById('2001.06890v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2001.06890v1-abstract-full" style="display: none;"> We propose a one-way coupled model that tracks individual primary particles in a conceptually simple cellular flow setup to predict flocculation in turbulence. This computationally efficient model accounts for Stokes drag, lubrication, cohesive and direct contact forces on the primary spherical particles and allows for a systematic simulation campaign that yields the transient mean floc size as a function of the governing dimensionless parameters. The simulations reproduce the growth of the cohesive flocs with time, and the emergence of a log-normal equilibrium distribution governed by the balance of aggregation and breakage. Flocculation proceeds most rapidly when the Stokes number of the primary particles is \textit{O}(1). Results from this simple computational model are consistent with experimental observations, thus allowing us to propose a new analytical flocculation model that yields improved agreement with experimental data, especially during the transient stages. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.06890v1-abstract-full').style.display = 'none'; document.getElementById('2001.06890v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 January, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for JFM Rapids, 10 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1802.02888">arXiv:1802.02888</a> <span> [<a href="https://arxiv.org/pdf/1802.02888">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> </div> <p class="title is-5 mathjax"> Spatial mapping and analysis of aerosols during a forest fire using computational mobile microscopy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Wu%2C+Y">Yichen Wu</a>, <a href="/search/physics?searchtype=author&query=Shiledar%2C+A">Ashutosh Shiledar</a>, <a href="/search/physics?searchtype=author&query=Luo%2C+Y">Yi Luo</a>, <a href="/search/physics?searchtype=author&query=Wong%2C+J">Jeffrey Wong</a>, <a href="/search/physics?searchtype=author&query=Chen%2C+C">Cheng Chen</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bijie Bai</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+Y">Yibo Zhang</a>, <a href="/search/physics?searchtype=author&query=Tamamitsu%2C+M">Miu Tamamitsu</a>, <a href="/search/physics?searchtype=author&query=Ozcan%2C+A">Aydogan Ozcan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1802.02888v1-abstract-short" style="display: inline;"> Forest fires are a major source of particulate matter (PM) air pollution on a global scale. The composition and impact of PM are typically studied using only laboratory instruments and extrapolated to real fire events owing to a lack of analytical techniques suitable for field-settings. To address this and similar field test challenges, we developed a mobile-microscopy and machine-learning-based a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.02888v1-abstract-full').style.display = 'inline'; document.getElementById('1802.02888v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1802.02888v1-abstract-full" style="display: none;"> Forest fires are a major source of particulate matter (PM) air pollution on a global scale. The composition and impact of PM are typically studied using only laboratory instruments and extrapolated to real fire events owing to a lack of analytical techniques suitable for field-settings. To address this and similar field test challenges, we developed a mobile-microscopy and machine-learning-based air quality monitoring platform called c-Air, which can perform air sampling and microscopic analysis of aerosols in an integrated portable device. We tested its performance for PM sizing and morphological analysis during a recent forest fire event in La Tuna Canyon Park by spatially mapping the PM. The result shows that with decreasing distance to the fire site, the PM concentration increases dramatically, especially for particles smaller than 2 microns. Image analysis from the c-Air portable device also shows that the increased PM is comparatively strongly absorbing and asymmetric, with an aspect ratio of 0.5-0.7. These PM features indicate that a major portion of the PM may be open-flame-combustion-generated element carbon soot-type particles. This initial small-scale experiment shows that c-Air has some potential for forest fire monitoring. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.02888v1-abstract-full').style.display = 'none'; document.getElementById('1802.02888v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 78A10 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1801.07849">arXiv:1801.07849</a> <span> [<a href="https://arxiv.org/pdf/1801.07849">pdf</a>, <a href="https://arxiv.org/format/1801.07849">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1364/OE.26.018644">10.1364/OE.26.018644 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Probing vectorial near field of light: imaging theory and design principles of nanoprobes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Sun%2C+L">Lin Sun</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Benfeng Bai</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+J">Jia Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1801.07849v3-abstract-short" style="display: inline;"> Near-field microscopy is widely used for characterizing electromagnetic fields at nanoscale, where nanoprobes afford the opportunity to extract subwavelength optical quantities, including the amplitude, phase, polarization and chirality. However, owing to the complexity of various nanoprobes, a general and intuitive theory is highly needed to assess the vectorial field response of the nanoprobes a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1801.07849v3-abstract-full').style.display = 'inline'; document.getElementById('1801.07849v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1801.07849v3-abstract-full" style="display: none;"> Near-field microscopy is widely used for characterizing electromagnetic fields at nanoscale, where nanoprobes afford the opportunity to extract subwavelength optical quantities, including the amplitude, phase, polarization and chirality. However, owing to the complexity of various nanoprobes, a general and intuitive theory is highly needed to assess the vectorial field response of the nanoprobes and interpret the mechanism of the probe-field interaction. Here, we develop a general imaging theory based on the reciprocity of electromagnetism and multipole expansion analysis. The proposed theory closely resembles the multipolar Hamiltonian for light-matter interaction energy, revealing the coupling mechanism of the probe-field interaction. Based on this theory, we introduce a new paradigm for the design of functional nanoprobes by analyzing the reciprocal dipole moments, and establish effective design principles for the imaging of vectorial near fields. Moreover, we numerically analyze the responses of two typical probes, which can quantitatively reproduce and well explain the experimental results of previously reported measurements of optical magnetism and transverse spin angular momentum. Our work provides a powerful tool for the design and analysis of new functional probes that may enable the probing of various physical quantities of the vectorial near field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1801.07849v3-abstract-full').style.display = 'none'; document.getElementById('1801.07849v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 January, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 5 figures, revised contents and styles of references</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1708.05350">arXiv:1708.05350</a> <span> [<a href="https://arxiv.org/pdf/1708.05350">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Engineering, Finance, and Science">cs.CE</span> </div> </div> <p class="title is-5 mathjax"> Application of Hilbert-Huang decomposition to reduce noise and characterize for NMR FID signal of proton precession magnetometer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Liu%2C+H">Huan Liu</a>, <a href="/search/physics?searchtype=author&query=Dong%2C+H">Haobin Dong</a>, <a href="/search/physics?searchtype=author&query=Liu%2C+Z">Zheng Liu</a>, <a href="/search/physics?searchtype=author&query=Ge%2C+J">Jian Ge</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bingjie Bai</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+C">Cheng Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1708.05350v1-abstract-short" style="display: inline;"> The parameters in a nuclear magnetic resonance (NMR) free induction decay (FID) signal contain information that is useful in magnetic field measurement, magnetic resonance sounding (MRS) and other related applications. A real time sampled FID signal is well modeled as a finite mixture of exponential sequences plus noise. We propose to use the Hilbert-Huang Transform (HHT) for noise reduction and c… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1708.05350v1-abstract-full').style.display = 'inline'; document.getElementById('1708.05350v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1708.05350v1-abstract-full" style="display: none;"> The parameters in a nuclear magnetic resonance (NMR) free induction decay (FID) signal contain information that is useful in magnetic field measurement, magnetic resonance sounding (MRS) and other related applications. A real time sampled FID signal is well modeled as a finite mixture of exponential sequences plus noise. We propose to use the Hilbert-Huang Transform (HHT) for noise reduction and characterization, where the generalized Hilbert-Huang represents a way to decompose a signal into so-called intrinsic mode function (IMF) along with a trend, and obtain instantaneous frequency data. Moreover, the HHT for an FID signal's feature analysis is applied for the first time. First, acquiring the actual untuned FID signal by a developed prototype of proton magnetometer, and then the empirical mode decomposition (EMD) is performed to decompose the noise and original FID. Finally, the HHT is applied to the obtained IMFs to extract the Hilbert energy spectrum, to indicate the energy distribution of the signal on the frequency axis. By theory analysis and the testing of an actual FID signal, the results show that, compared with general noise reduction methods such as auto correlation and singular value decomposition (SVD), combined with the proposed method can further suppress the interfered signals effectively, and can obtain different components of FID signal, which can use to identify the magnetic anomaly, the existence of groundwater etc. This is a very important property since it can be exploited to separate the FID signal from noise and to estimate exponential sequence parameters of FID signal. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1708.05350v1-abstract-full').style.display = 'none'; document.getElementById('1708.05350v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 August, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 6 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1705.03621">arXiv:1705.03621</a> <span> [<a href="https://arxiv.org/pdf/1705.03621">pdf</a>, <a href="https://arxiv.org/format/1705.03621">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1364/JOSAB.34.002081">10.1364/JOSAB.34.002081 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Two-photon superbunching of pseudothermal light in a Hanbury Brown-Twiss interferometer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bin Bai</a>, <a href="/search/physics?searchtype=author&query=Liu%2C+J">Jianbin Liu</a>, <a href="/search/physics?searchtype=author&query=Zhou%2C+Y">Yu Zhou</a>, <a href="/search/physics?searchtype=author&query=Zheng%2C+H">Huaibin Zheng</a>, <a href="/search/physics?searchtype=author&query=Chen%2C+H">Hui Chen</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+S">Songlin Zhang</a>, <a href="/search/physics?searchtype=author&query=He%2C+Y">Yuchen He</a>, <a href="/search/physics?searchtype=author&query=Li%2C+F">Fuli Li</a>, <a href="/search/physics?searchtype=author&query=Xu%2C+Z">Zhuo Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1705.03621v1-abstract-short" style="display: inline;"> Two-photon superbunching of pseudothermal light is observed with single-mode continuous-wave laser light in a linear optical system. By adding more two-photon paths via three rotating ground glasses,g(2)(0) = 7.10 is experimentally observed. The second-order temporal coherence function of superbunching pseudothermal light is theoretically and experimentally studied in detail. It is predicted that… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1705.03621v1-abstract-full').style.display = 'inline'; document.getElementById('1705.03621v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1705.03621v1-abstract-full" style="display: none;"> Two-photon superbunching of pseudothermal light is observed with single-mode continuous-wave laser light in a linear optical system. By adding more two-photon paths via three rotating ground glasses,g(2)(0) = 7.10 is experimentally observed. The second-order temporal coherence function of superbunching pseudothermal light is theoretically and experimentally studied in detail. It is predicted that the degree of coherence of light can be increased dramatically by adding more multi-photon paths. For instance, the degree of the second- and third-order coherence of the superbunching pseudothermal light with five rotating ground glasses can reach 32 and 7776, respectively. The results are helpful to understand the physics of superbunching and to improve the visibility of thermal light ghost imaging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1705.03621v1-abstract-full').style.display = 'none'; document.getElementById('1705.03621v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 May, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Journal of the Optical Society of America B 34, 2081 (2017) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1702.08792">arXiv:1702.08792</a> <span> [<a href="https://arxiv.org/pdf/1702.08792">pdf</a>, <a href="https://arxiv.org/format/1702.08792">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1103/PhysRevA.95.053809">10.1103/PhysRevA.95.053809 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Superbunching pseudothermal light </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Zhou%2C+Y">Yu Zhou</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bin Bai</a>, <a href="/search/physics?searchtype=author&query=Zheng%2C+H">Huaibin Zheng</a>, <a href="/search/physics?searchtype=author&query=Chen%2C+H">Hui Chen</a>, <a href="/search/physics?searchtype=author&query=Liu%2C+J">Jianbin Liu</a>, <a href="/search/physics?searchtype=author&query=Li%2C+F">Fu-li Li</a>, <a href="/search/physics?searchtype=author&query=Xu%2C+Z">Zhuo Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1702.08792v1-abstract-short" style="display: inline;"> A novel and simple superbunching pseudothermal light source is introduced based on common instruments such as laser, lens, pinhole and groundglass. $g^{(2)}(0)=3.66 \pm 0.02$ is observed in the suggested scheme by employing two rotating groundglass. Quantum and classical theories are employed to interpret the observed superbunching effect. It is predicted that $g^{(2)}(0)$ can reach $2^N$ if $N$ r… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1702.08792v1-abstract-full').style.display = 'inline'; document.getElementById('1702.08792v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1702.08792v1-abstract-full" style="display: none;"> A novel and simple superbunching pseudothermal light source is introduced based on common instruments such as laser, lens, pinhole and groundglass. $g^{(2)}(0)=3.66 \pm 0.02$ is observed in the suggested scheme by employing two rotating groundglass. Quantum and classical theories are employed to interpret the observed superbunching effect. It is predicted that $g^{(2)}(0)$ can reach $2^N$ if $N$ rotating groundglass were employed. These results are helpful to understand the physics of superbunching. The proposed superbunching pseudothermal light may serve as a new type of light to study the second- and higher-order coherence of light and have potential application in improving the visibility of thermal light ghost imaging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1702.08792v1-abstract-full').style.display = 'none'; document.getElementById('1702.08792v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 February, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 Pages, 5 figures. Submitted for publication and comments are welcome</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Phys. Rev. A 95, 053809 (2017) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1612.07120">arXiv:1612.07120</a> <span> [<a href="https://arxiv.org/pdf/1612.07120">pdf</a>, <a href="https://arxiv.org/ps/1612.07120">ps</a>, <a href="https://arxiv.org/format/1612.07120">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.ijleo.2017.08.057">10.1016/j.ijleo.2017.08.057 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Imaging around corners with single-pixel detector by computational ghost imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bin Bai</a>, <a href="/search/physics?searchtype=author&query=Liu%2C+J">Jianbin Liu</a>, <a href="/search/physics?searchtype=author&query=Zhou%2C+Y">Yu Zhou</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+S">Songlin Zhang</a>, <a href="/search/physics?searchtype=author&query=He%2C+Y">Yuchen He</a>, <a href="/search/physics?searchtype=author&query=Xu%2C+Z">Zhuo Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1612.07120v1-abstract-short" style="display: inline;"> We have designed a single-pixel camera with imaging around corners based on computational ghost imaging. It can obtain the image of an object when the camera cannot look at the object directly. Our imaging system explores the fact that a bucket detector in a ghost imaging setup has no spatial resolution capability. A series of experiments have been designed to confirm our predictions. This camera… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1612.07120v1-abstract-full').style.display = 'inline'; document.getElementById('1612.07120v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1612.07120v1-abstract-full" style="display: none;"> We have designed a single-pixel camera with imaging around corners based on computational ghost imaging. It can obtain the image of an object when the camera cannot look at the object directly. Our imaging system explores the fact that a bucket detector in a ghost imaging setup has no spatial resolution capability. A series of experiments have been designed to confirm our predictions. This camera has potential applications for imaging around corner or other similar environments where the object cannot be observed directly. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1612.07120v1-abstract-full').style.display = 'none'; document.getElementById('1612.07120v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 December, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2016. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1608.06355">arXiv:1608.06355</a> <span> [<a href="https://arxiv.org/pdf/1608.06355">pdf</a>, <a href="https://arxiv.org/ps/1608.06355">ps</a>, <a href="https://arxiv.org/format/1608.06355">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1364/AO.55.007497">10.1364/AO.55.007497 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Design considerations of high-performance InGaAs/InP single-photon avalanche diodes for quantum key distribution </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Ma%2C+J">Jian Ma</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bing Bai</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+L">Liu-Jun Wang</a>, <a href="/search/physics?searchtype=author&query=Tong%2C+C">Cun-Zhu Tong</a>, <a href="/search/physics?searchtype=author&query=Jin%2C+G">Ge Jin</a>, <a href="/search/physics?searchtype=author&query=Zhang%2C+J">Jun Zhang</a>, <a href="/search/physics?searchtype=author&query=Pan%2C+J">Jian-Wei Pan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1608.06355v1-abstract-short" style="display: inline;"> InGaAs/InP single-photon avalanche diodes (SPADs) are widely used in practical applications requiring near-infrared photon counting such as quantum key distribution (QKD). Photon detection efficiency and dark count rate are the intrinsic parameters of InGaAs/InP SPADs, due to the fact that their performances cannot be improved using different quenching electronics given the same operation conditio… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1608.06355v1-abstract-full').style.display = 'inline'; document.getElementById('1608.06355v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1608.06355v1-abstract-full" style="display: none;"> InGaAs/InP single-photon avalanche diodes (SPADs) are widely used in practical applications requiring near-infrared photon counting such as quantum key distribution (QKD). Photon detection efficiency and dark count rate are the intrinsic parameters of InGaAs/InP SPADs, due to the fact that their performances cannot be improved using different quenching electronics given the same operation conditions. After modeling these parameters and developing a simulation platform for InGaAs/InP SPADs, we investigate the semiconductor structure design and optimization. The parameters of photon detection efficiency and dark count rate highly depend on the variables of absorption layer thickness, multiplication layer thickness, excess bias voltage and temperature. By evaluating the decoy-state QKD performance, the variables for SPAD design and operation can be globally optimized. Such optimization from the perspective of specific applications can provide an effective approach to design high-performance InGaAs/InP SPADs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1608.06355v1-abstract-full').style.display = 'none'; document.getElementById('1608.06355v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 August, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 7 figures. Accepted for publication in Applied Optics</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Appl. Opt. 55(27), 7497-7502 (2016) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1605.04424">arXiv:1605.04424</a> <span> [<a href="https://arxiv.org/pdf/1605.04424">pdf</a>, <a href="https://arxiv.org/ps/1605.04424">ps</a>, <a href="https://arxiv.org/format/1605.04424">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> Hanbury-Brown and Twiss effect without quantum interference in photon counting regime </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Bai%2C+B">Bin Bai</a>, <a href="/search/physics?searchtype=author&query=Zhou%2C+Y">Yu Zhou</a>, <a href="/search/physics?searchtype=author&query=Chen%2C+H">Hui Chen</a>, <a href="/search/physics?searchtype=author&query=Zheng%2C+H+b">Huai bin Zheng</a>, <a href="/search/physics?searchtype=author&query=Liu%2C+J+b">Jian bin Liu</a>, <a href="/search/physics?searchtype=author&query=Liu%2C+R+f">Rui feng Liu</a>, <a href="/search/physics?searchtype=author&query=Wang%2C+Y+l">Yun long Wang</a>, <a href="/search/physics?searchtype=author&query=Xu%2C+Z">Zhuo Xu</a>, <a href="/search/physics?searchtype=author&query=Li%2C+F">Fuli Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1605.04424v1-abstract-short" style="display: inline;"> Usually HBT effect can be interpreted by classical (intensity fluctuation correlation) and quantum (interference of two-photon probability amplitudes) theories properly at the same time. In this manuscript, we report a deliberately designed experiment in which two chaotic light beams has the same intensity fluctuation but mutual-orthogonal polarizations to each other so there will be no interferen… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1605.04424v1-abstract-full').style.display = 'inline'; document.getElementById('1605.04424v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1605.04424v1-abstract-full" style="display: none;"> Usually HBT effect can be interpreted by classical (intensity fluctuation correlation) and quantum (interference of two-photon probability amplitudes) theories properly at the same time. In this manuscript, we report a deliberately designed experiment in which two chaotic light beams has the same intensity fluctuation but mutual-orthogonal polarizations to each other so there will be no interference of two-photon probability amplitudes. Classical and quantum theory give different predictions on if there should be HBT (photon bunching) effect or not in the experiment. The experiment results are used to test the two different predictions. At the end, both the temporal and spatial HBT effects are observed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1605.04424v1-abstract-full').style.display = 'none'; document.getElementById('1605.04424v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 May, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2016. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1502.00753">arXiv:1502.00753</a> <span> [<a href="https://arxiv.org/pdf/1502.00753">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> Fano-resonance boosted cascaded field enhancement in a plasmonic nanoparticle-in-cavity nanoantenna array and its SERS application </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/physics?searchtype=author&query=Zhu%2C+Z">Zhendong Zhu</a>, <a href="/search/physics?searchtype=author&query=Bai%2C+B">Benfeng Bai</a>, <a href="/search/physics?searchtype=author&query=You%2C+O">Oubo You</a>, <a href="/search/physics?searchtype=author&query=Li%2C+Q">Qunqing Li</a>, <a href="/search/physics?searchtype=author&query=Fan%2C+S">Shoushan Fan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1502.00753v1-abstract-short" style="display: inline;"> Cascaded optical field enhancement (CFE) can be realized in some specially designed multiscale plasmonic nanostructures, where the generation of extremely strong field at nanoscale volume is crucial for many applications, for example, surface enhanced Raman spectroscopy (SERS). Here, we propose a strategy of realizing a high-quality plasmonic nanoparticle-in-cavity (PIC) nanoantenna array, where s… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1502.00753v1-abstract-full').style.display = 'inline'; document.getElementById('1502.00753v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1502.00753v1-abstract-full" style="display: none;"> Cascaded optical field enhancement (CFE) can be realized in some specially designed multiscale plasmonic nanostructures, where the generation of extremely strong field at nanoscale volume is crucial for many applications, for example, surface enhanced Raman spectroscopy (SERS). Here, we propose a strategy of realizing a high-quality plasmonic nanoparticle-in-cavity (PIC) nanoantenna array, where strong coupling between a nanoparticle dark mode with a high order nanocavity bright mode can produce Fano resonance at a target wavelength. The Fano resonance can effectively boost the CFE in the PIC, with a field enhancement factor up to 5X10^2. A cost-effective and reliable nanofabrication method is developed with room temperature nanoimprinting lithography to manufacture high-quality PIC arrays. This technique guarantees the generation of only one gold nanoparticle at the bottom of each nanocavity, which is crucial for the generation of the expected CFE. As a demonstration of the performance and application of the PIC array, it is used as active SERS substrate for detecting 4-aminothiophenol molecules. The SERS enhancement factor up to 2X10^7 is obtained experimentally, verifying the field enhancement and potential of this device. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1502.00753v1-abstract-full').style.display = 'none'; document.getElementById('1502.00753v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 February, 2015; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2015. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages and 4 figures</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Bai%2C+B&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Bai%2C+B&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Bai%2C+B&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>