CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;45 of 45 results for author: <span class="mathjax">Harris, P</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Harris%2C+P">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Harris, P"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Harris%2C+P&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Harris, P"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09851">arXiv:2411.09851</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09851">pdf</a>, <a href="https://arxiv.org/format/2411.09851">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> </div> </div> <p class="title is-5 mathjax"> SymbolFit: Automatic Parametric Modeling with Symbolic Regression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tsoi%2C+H+F">Ho Fung Tsoi</a>, <a href="/search/cs?searchtype=author&amp;query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&amp;query=Caillol%2C+C">Cecile Caillol</a>, <a href="/search/cs?searchtype=author&amp;query=Cranmer%2C+M">Miles Cranmer</a>, <a href="/search/cs?searchtype=author&amp;query=Dasu%2C+S">Sridhara Dasu</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Lipeles%2C+E">Elliot Lipeles</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09851v1-abstract-short" style="display: inline;"> We introduce SymbolFit, a framework that automates parametric modeling by using symbolic regression to perform a machine-search for functions that fit the data, while simultaneously providing uncertainty estimates in a single run. Traditionally, constructing a parametric model to accurately describe binned data has been a manual and iterative process, requiring an adequate functional form to be de&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09851v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09851v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09851v1-abstract-full" style="display: none;"> We introduce SymbolFit, a framework that automates parametric modeling by using symbolic regression to perform a machine-search for functions that fit the data, while simultaneously providing uncertainty estimates in a single run. Traditionally, constructing a parametric model to accurately describe binned data has been a manual and iterative process, requiring an adequate functional form to be determined before the fit can be performed. The main challenge arises when the appropriate functional forms cannot be derived from first principles, especially when there is no underlying true closed-form function for the distribution. In this work, we address this problem by utilizing symbolic regression, a machine learning technique that explores a vast space of candidate functions without needing a predefined functional form, treating the functional form itself as a trainable parameter. Our approach is demonstrated in data analysis applications in high-energy physics experiments at the CERN Large Hadron Collider (LHC). We demonstrate its effectiveness and efficiency using five real proton-proton collision datasets from new physics searches at the LHC, namely the background modeling in resonance searches for high-mass dijet, trijet, paired-dijet, diphoton, and dimuon events. We also validate the framework using several toy datasets with one and more variables. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09851v1-abstract-full').style.display = 'none'; document.getElementById('2411.09851v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">53 pages, 35 figures. Under review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.13947">arXiv:2410.13947</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.13947">pdf</a>, <a href="https://arxiv.org/format/2410.13947">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> MACK: Mismodeling Addressed with Contrastive Knowledge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sheldon%2C+L+R">Liam Rankin Sheldon</a>, <a href="/search/cs?searchtype=author&amp;query=Rankin%2C+D+S">Dylan Sheldon Rankin</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.13947v1-abstract-short" style="display: inline;"> The use of machine learning methods in high energy physics typically relies on large volumes of precise simulation for training. As machine learning models become more complex they can become increasingly sensitive to differences between this simulation and the real data collected by experiments. We present a generic methodology based on contrastive learning which is able to greatly mitigate this&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.13947v1-abstract-full').style.display = 'inline'; document.getElementById('2410.13947v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.13947v1-abstract-full" style="display: none;"> The use of machine learning methods in high energy physics typically relies on large volumes of precise simulation for training. As machine learning models become more complex they can become increasingly sensitive to differences between this simulation and the real data collected by experiments. We present a generic methodology based on contrastive learning which is able to greatly mitigate this negative effect. Crucially, the method does not require prior knowledge of the specifics of the mismodeling. While we demonstrate the efficacy of this technique using the task of jet-tagging at the Large Hadron Collider, it is applicable to a wide array of different tasks both in and out of the field of high energy physics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.13947v1-abstract-full').style.display = 'none'; document.getElementById('2410.13947v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 4 figures, Submission to SciPost</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.12793">arXiv:2410.12793</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.12793">pdf</a>, <a href="https://arxiv.org/format/2410.12793">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Environment Scan of Generative AI Infrastructure for Clinical and Translational Science </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Idnay%2C+B">Betina Idnay</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Z">Zihan Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+W+G">William G. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Adibuzzaman%2C+M">Mohammad Adibuzzaman</a>, <a href="/search/cs?searchtype=author&amp;query=Anderson%2C+N+R">Nicholas R. Anderson</a>, <a href="/search/cs?searchtype=author&amp;query=Bahroos%2C+N">Neil Bahroos</a>, <a href="/search/cs?searchtype=author&amp;query=Bell%2C+D+S">Douglas S. Bell</a>, <a href="/search/cs?searchtype=author&amp;query=Bumgardner%2C+C">Cody Bumgardner</a>, <a href="/search/cs?searchtype=author&amp;query=Campion%2C+T">Thomas Campion</a>, <a href="/search/cs?searchtype=author&amp;query=Castro%2C+M">Mario Castro</a>, <a href="/search/cs?searchtype=author&amp;query=Cimino%2C+J+J">James J. Cimino</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+I+G">I. Glenn Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Dorr%2C+D">David Dorr</a>, <a href="/search/cs?searchtype=author&amp;query=Elkin%2C+P+L">Peter L Elkin</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+J+W">Jungwei W. Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Ferris%2C+T">Todd Ferris</a>, <a href="/search/cs?searchtype=author&amp;query=Foran%2C+D+J">David J. Foran</a>, <a href="/search/cs?searchtype=author&amp;query=Hanauer%2C+D">David Hanauer</a>, <a href="/search/cs?searchtype=author&amp;query=Hogarth%2C+M">Mike Hogarth</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+K">Kun Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Kalpathy-Cramer%2C+J">Jayashree Kalpathy-Cramer</a>, <a href="/search/cs?searchtype=author&amp;query=Kandpal%2C+M">Manoj Kandpal</a>, <a href="/search/cs?searchtype=author&amp;query=Karnik%2C+N+S">Niranjan S. Karnik</a>, <a href="/search/cs?searchtype=author&amp;query=Katoch%2C+A">Avnish Katoch</a>, <a href="/search/cs?searchtype=author&amp;query=Lai%2C+A+M">Albert M. Lai</a> , et al. (32 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.12793v1-abstract-short" style="display: inline;"> This study reports a comprehensive environmental scan of the generative AI (GenAI) infrastructure in the national network for clinical and translational science across 36 institutions supported by the Clinical and Translational Science Award (CTSA) Program led by the National Center for Advancing Translational Sciences (NCATS) of the National Institutes of Health (NIH) at the United States. With t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12793v1-abstract-full').style.display = 'inline'; document.getElementById('2410.12793v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.12793v1-abstract-full" style="display: none;"> This study reports a comprehensive environmental scan of the generative AI (GenAI) infrastructure in the national network for clinical and translational science across 36 institutions supported by the Clinical and Translational Science Award (CTSA) Program led by the National Center for Advancing Translational Sciences (NCATS) of the National Institutes of Health (NIH) at the United States. With the rapid advancement of GenAI technologies, including large language models (LLMs), healthcare institutions face unprecedented opportunities and challenges. This research explores the current status of GenAI integration, focusing on stakeholder roles, governance structures, and ethical considerations by administering a survey among leaders of health institutions (i.e., representing academic medical centers and health systems) to assess the institutional readiness and approach towards GenAI adoption. Key findings indicate a diverse range of institutional strategies, with most organizations in the experimental phase of GenAI deployment. The study highlights significant variations in governance models, with a strong preference for centralized decision-making but notable gaps in workforce training and ethical oversight. Moreover, the results underscore the need for a more coordinated approach to GenAI governance, emphasizing collaboration among senior leaders, clinicians, information technology staff, and researchers. Our analysis also reveals concerns regarding GenAI bias, data security, and stakeholder trust, which must be addressed to ensure the ethical and effective implementation of GenAI technologies. This study offers valuable insights into the challenges and opportunities of GenAI integration in healthcare, providing a roadmap for institutions aiming to leverage GenAI for improved quality of care and operational efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12793v1-abstract-full').style.display = 'none'; document.getElementById('2410.12793v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.05207">arXiv:2409.05207</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.05207">pdf</a>, <a href="https://arxiv.org/format/2409.05207">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Low Latency Transformer Inference on FPGAs for Physics Applications with hls4ml </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhixing Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+D">Dennis Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yihui Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Khoda%2C+E+E">Elham E Khoda</a>, <a href="/search/cs?searchtype=author&amp;query=Hauck%2C+S">Scott Hauck</a>, <a href="/search/cs?searchtype=author&amp;query=Hsu%2C+S">Shih-Chieh Hsu</a>, <a href="/search/cs?searchtype=author&amp;query=Govorkova%2C+E">Ekaterina Govorkova</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Moreno%2C+E+A">Eric A. Moreno</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.05207v1-abstract-short" style="display: inline;"> This study presents an efficient implementation of transformer architectures in Field-Programmable Gate Arrays(FPGAs) using hls4ml. We demonstrate the strategy for implementing the multi-head attention, softmax, and normalization layer and evaluate three distinct models. Their deployment on VU13P FPGA chip achieved latency less than 2us, demonstrating the potential for real-time applications. HLS4&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.05207v1-abstract-full').style.display = 'inline'; document.getElementById('2409.05207v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.05207v1-abstract-full" style="display: none;"> This study presents an efficient implementation of transformer architectures in Field-Programmable Gate Arrays(FPGAs) using hls4ml. We demonstrate the strategy for implementing the multi-head attention, softmax, and normalization layer and evaluate three distinct models. Their deployment on VU13P FPGA chip achieved latency less than 2us, demonstrating the potential for real-time applications. HLS4ML compatibility with any TensorFlow-built transformer model further enhances the scalability and applicability of this work. Index Terms: FPGAs, machine learning, transformers, high energy physics, LIGO <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.05207v1-abstract-full').style.display = 'none'; document.getElementById('2409.05207v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.19048">arXiv:2407.19048</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.19048">pdf</a>, <a href="https://arxiv.org/format/2407.19048">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="General Relativity and Quantum Cosmology">gr-qc</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Rapid Likelihood Free Inference of Compact Binary Coalescences using Accelerated Hardware </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chatterjee%2C+D">Deep Chatterjee</a>, <a href="/search/cs?searchtype=author&amp;query=Marx%2C+E">Ethan Marx</a>, <a href="/search/cs?searchtype=author&amp;query=Benoit%2C+W">William Benoit</a>, <a href="/search/cs?searchtype=author&amp;query=Kumar%2C+R">Ravi Kumar</a>, <a href="/search/cs?searchtype=author&amp;query=Desai%2C+M">Malina Desai</a>, <a href="/search/cs?searchtype=author&amp;query=Govorkova%2C+E">Ekaterina Govorkova</a>, <a href="/search/cs?searchtype=author&amp;query=Gunny%2C+A">Alec Gunny</a>, <a href="/search/cs?searchtype=author&amp;query=Moreno%2C+E">Eric Moreno</a>, <a href="/search/cs?searchtype=author&amp;query=Omer%2C+R">Rafia Omer</a>, <a href="/search/cs?searchtype=author&amp;query=Raikman%2C+R">Ryan Raikman</a>, <a href="/search/cs?searchtype=author&amp;query=Saleem%2C+M">Muhammed Saleem</a>, <a href="/search/cs?searchtype=author&amp;query=Aggarwal%2C+S">Shrey Aggarwal</a>, <a href="/search/cs?searchtype=author&amp;query=Coughlin%2C+M+W">Michael W. Coughlin</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Katsavounidis%2C+E">Erik Katsavounidis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.19048v1-abstract-short" style="display: inline;"> We report a gravitational-wave parameter estimation algorithm, AMPLFI, based on likelihood-free inference using normalizing flows. The focus of AMPLFI is to perform real-time parameter estimation for candidates detected by machine-learning based compact binary coalescence search, Aframe. We present details of our algorithm and optimizations done related to data-loading and pre-processing on accele&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19048v1-abstract-full').style.display = 'inline'; document.getElementById('2407.19048v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.19048v1-abstract-full" style="display: none;"> We report a gravitational-wave parameter estimation algorithm, AMPLFI, based on likelihood-free inference using normalizing flows. The focus of AMPLFI is to perform real-time parameter estimation for candidates detected by machine-learning based compact binary coalescence search, Aframe. We present details of our algorithm and optimizations done related to data-loading and pre-processing on accelerated hardware. We train our model using binary black-hole (BBH) simulations on real LIGO-Virgo detector noise. Our model has $\sim 6$ million trainable parameters with training times $\lesssim 24$ hours. Based on online deployment on a mock data stream of LIGO-Virgo data, Aframe + AMPLFI is able to pick up BBH candidates and infer parameters for real-time alerts from data acquisition with a net latency of $\sim 6$s. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19048v1-abstract-full').style.display = 'none'; document.getElementById('2407.19048v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to MLST</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.19522">arXiv:2406.19522</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.19522">pdf</a>, <a href="https://arxiv.org/format/2406.19522">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Reliable edge machine learning hardware for scientific applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Baldi%2C+T">Tommaso Baldi</a>, <a href="/search/cs?searchtype=author&amp;query=Campos%2C+J">Javier Campos</a>, <a href="/search/cs?searchtype=author&amp;query=Hawks%2C+B">Ben Hawks</a>, <a href="/search/cs?searchtype=author&amp;query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Diaz%2C+D">Daniel Diaz</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Kastner%2C+R">Ryan Kastner</a>, <a href="/search/cs?searchtype=author&amp;query=Meza%2C+A">Andres Meza</a>, <a href="/search/cs?searchtype=author&amp;query=Quinnan%2C+M">Melissa Quinnan</a>, <a href="/search/cs?searchtype=author&amp;query=Weng%2C+O">Olivia Weng</a>, <a href="/search/cs?searchtype=author&amp;query=Geniesse%2C+C">Caleb Geniesse</a>, <a href="/search/cs?searchtype=author&amp;query=Gholami%2C+A">Amir Gholami</a>, <a href="/search/cs?searchtype=author&amp;query=Mahoney%2C+M+W">Michael W. Mahoney</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Agar%2C+J">Joshua Agar</a>, <a href="/search/cs?searchtype=author&amp;query=Qin%2C+S">Shuyu Qin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.19522v1-abstract-short" style="display: inline;"> Extreme data rate scientific experiments create massive amounts of data that require efficient ML edge processing. This leads to unique validation challenges for VLSI implementations of ML algorithms: enabling bit-accurate functional simulations for performance validation in experimental software frameworks, verifying those ML models are robust under extreme quantization and pruning, and enabling&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.19522v1-abstract-full').style.display = 'inline'; document.getElementById('2406.19522v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.19522v1-abstract-full" style="display: none;"> Extreme data rate scientific experiments create massive amounts of data that require efficient ML edge processing. This leads to unique validation challenges for VLSI implementations of ML algorithms: enabling bit-accurate functional simulations for performance validation in experimental software frameworks, verifying those ML models are robust under extreme quantization and pruning, and enabling ultra-fine-grained model inspection for efficient fault tolerance. We discuss approaches to developing and validating reliable algorithms at the scientific edge under such strict latency, resource, power, and area requirements in extreme experimental environments. We study metrics for developing robust algorithms, present preliminary results and mitigation strategies, and conclude with an outlook of these and future directions of research towards the longer-term goal of developing autonomous scientific experimentation methods for accelerated scientific discovery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.19522v1-abstract-full').style.display = 'none'; document.getElementById('2406.19522v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE VLSI Test Symposium 2024 (VTS)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-CONF-24-0116-CSAID </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.07066">arXiv:2403.07066</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.07066">pdf</a>, <a href="https://arxiv.org/format/2403.07066">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Re-Simulation-based Self-Supervised Learning for Pre-Training Foundation Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Kagan%2C+M">Michael Kagan</a>, <a href="/search/cs?searchtype=author&amp;query=Krupa%2C+J">Jeffrey Krupa</a>, <a href="/search/cs?searchtype=author&amp;query=Maier%2C+B">Benedikt Maier</a>, <a href="/search/cs?searchtype=author&amp;query=Woodward%2C+N">Nathaniel Woodward</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.07066v1-abstract-short" style="display: inline;"> Self-Supervised Learning (SSL) is at the core of training modern large machine learning models, providing a scheme for learning powerful representations that can be used in a variety of downstream tasks. However, SSL strategies must be adapted to the type of training data and downstream tasks required. We propose RS3L, a novel simulation-based SSL strategy that employs a method of re-simulation to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07066v1-abstract-full').style.display = 'inline'; document.getElementById('2403.07066v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.07066v1-abstract-full" style="display: none;"> Self-Supervised Learning (SSL) is at the core of training modern large machine learning models, providing a scheme for learning powerful representations that can be used in a variety of downstream tasks. However, SSL strategies must be adapted to the type of training data and downstream tasks required. We propose RS3L, a novel simulation-based SSL strategy that employs a method of re-simulation to drive data augmentation for contrastive learning. By intervening in the middle of the simulation process and re-running simulation components downstream of the intervention, we generate multiple realizations of an event, thus producing a set of augmentations covering all physics-driven variations available in the simulator. Using experiments from high-energy physics, we explore how this strategy may enable the development of a foundation model; we show how R3SL pre-training enables powerful performance in downstream tasks such as discrimination of a variety of objects and uncertainty mitigation. In addition to our results, we make the RS3L dataset publicly available for further studies on how to improve SSL strategies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07066v1-abstract-full').style.display = 'none'; document.getElementById('2403.07066v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.01047">arXiv:2402.01047</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.01047">pdf</a>, <a href="https://arxiv.org/format/2402.01047">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Ultra Fast Transformers on FPGAs for Particle Physics Experiments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhixing Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+D">Dennis Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Khoda%2C+E+E">Elham E Khoda</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Govorkova%2C+E">Ekaterina Govorkova</a>, <a href="/search/cs?searchtype=author&amp;query=Moreno%2C+E">Eric Moreno</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Hauck%2C+S">Scott Hauck</a>, <a href="/search/cs?searchtype=author&amp;query=Hsu%2C+S">Shih-Chieh Hsu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.01047v1-abstract-short" style="display: inline;"> This work introduces a highly efficient implementation of the transformer architecture on a Field-Programmable Gate Array (FPGA) by using the \texttt{hls4ml} tool. Given the demonstrated effectiveness of transformer models in addressing a wide range of problems, their application in experimental triggers within particle physics becomes a subject of significant interest. In this work, we have imple&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.01047v1-abstract-full').style.display = 'inline'; document.getElementById('2402.01047v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.01047v1-abstract-full" style="display: none;"> This work introduces a highly efficient implementation of the transformer architecture on a Field-Programmable Gate Array (FPGA) by using the \texttt{hls4ml} tool. Given the demonstrated effectiveness of transformer models in addressing a wide range of problems, their application in experimental triggers within particle physics becomes a subject of significant interest. In this work, we have implemented critical components of a transformer model, such as multi-head attention and softmax layers. To evaluate the effectiveness of our implementation, we have focused on a particle physics jet flavor tagging problem, employing a public dataset. We recorded latency under 2 $渭$s on the Xilinx UltraScale+ FPGA, which is compatible with hardware trigger requirements at the CERN Large Hadron Collider experiments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.01047v1-abstract-full').style.display = 'none'; document.getElementById('2402.01047v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 2 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Machine Learning and the Physical Sciences Workshop, NeurIPS 2023 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.09949">arXiv:2401.09949</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.09949">pdf</a>, <a href="https://arxiv.org/format/2401.09949">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> </div> <p class="title is-5 mathjax"> SymbolNet: Neural Symbolic Regression with Adaptive Dynamic Pruning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tsoi%2C+H+F">Ho Fung Tsoi</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Dasu%2C+S">Sridhara Dasu</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.09949v2-abstract-short" style="display: inline;"> Contrary to genetic programming, the neural network approach to symbolic regression can efficiently handle high-dimensional inputs and leverage gradient methods for faster equation searching. Common ways of constraining expression complexity often involve multistage pruning with fine-tuning, which can result in significant performance loss. In this work, we propose $\tt{SymbolNet}$, a neural netwo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.09949v2-abstract-full').style.display = 'inline'; document.getElementById('2401.09949v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.09949v2-abstract-full" style="display: none;"> Contrary to genetic programming, the neural network approach to symbolic regression can efficiently handle high-dimensional inputs and leverage gradient methods for faster equation searching. Common ways of constraining expression complexity often involve multistage pruning with fine-tuning, which can result in significant performance loss. In this work, we propose $\tt{SymbolNet}$, a neural network approach to symbolic regression in a novel framework that allows dynamic pruning of model weights, input features, and mathematical operators in a single training process, where both training loss and expression complexity are optimized simultaneously. We introduce a sparsity regularization term for each pruning type, which can adaptively adjust its strength, leading to convergence at a target sparsity ratio. Unlike most existing symbolic regression methods that struggle with datasets containing more than $\mathcal{O}(10)$ inputs, we demonstrate the effectiveness of our model on the LHC jet tagging task (16 inputs), MNIST (784 inputs), and SVHN (3072 inputs). Our approach enables symbolic regression to achieve fast inference with nanosecond-scale latency on FPGAs for high-dimensional datasets in environments with stringent computational resource constraints, such as the high-energy physics experiments at the LHC. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.09949v2-abstract-full').style.display = 'none'; document.getElementById('2401.09949v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages. Minor fixes and formatting, under review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.07615">arXiv:2312.07615</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.07615">pdf</a>, <a href="https://arxiv.org/format/2312.07615">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> </div> </div> <p class="title is-5 mathjax"> Optimizing Likelihood-free Inference using Self-supervised Neural Symmetry Embeddings </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chatterjee%2C+D">Deep Chatterjee</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P+C">Philip C. Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Goel%2C+M">Maanas Goel</a>, <a href="/search/cs?searchtype=author&amp;query=Desai%2C+M">Malina Desai</a>, <a href="/search/cs?searchtype=author&amp;query=Coughlin%2C+M+W">Michael W. Coughlin</a>, <a href="/search/cs?searchtype=author&amp;query=Katsavounidis%2C+E">Erik Katsavounidis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.07615v1-abstract-short" style="display: inline;"> Likelihood-free inference is quickly emerging as a powerful tool to perform fast/effective parameter estimation. We demonstrate a technique of optimizing likelihood-free inference to make it even faster by marginalizing symmetries in a physical problem. In this approach, physical symmetries, for example, time-translation are learned using joint-embedding via self-supervised learning with symmetry&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.07615v1-abstract-full').style.display = 'inline'; document.getElementById('2312.07615v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.07615v1-abstract-full" style="display: none;"> Likelihood-free inference is quickly emerging as a powerful tool to perform fast/effective parameter estimation. We demonstrate a technique of optimizing likelihood-free inference to make it even faster by marginalizing symmetries in a physical problem. In this approach, physical symmetries, for example, time-translation are learned using joint-embedding via self-supervised learning with symmetry data augmentations. Subsequently, parameter inference is performed using a normalizing flow where the embedding network is used to summarize the data before conditioning the parameters. We present this approach on two simple physical problems and we show faster convergence in a smaller number of parameters compared to a normalizing flow that does not use a pre-trained symmetry-informed representation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.07615v1-abstract-full').style.display = 'none'; document.getElementById('2312.07615v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for Machine Learning and the Physical Sciences Workshop (submission 69) at NeurIPS 2023; for codes, see https://github.com/ML4GW/summer-projects-2023/blob/neurips-2023/symmetry-informed-flows/README.md</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.06047">arXiv:2310.06047</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.06047">pdf</a>, <a href="https://arxiv.org/format/2310.06047">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Knowledge Distillation for Anomaly Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pol%2C+A+A">Adrian Alan Pol</a>, <a href="/search/cs?searchtype=author&amp;query=Govorkova%2C+E">Ekaterina Govorkova</a>, <a href="/search/cs?searchtype=author&amp;query=Gronroos%2C+S">Sonja Gronroos</a>, <a href="/search/cs?searchtype=author&amp;query=Chernyavskaya%2C+N">Nadezda Chernyavskaya</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&amp;query=Ojalvo%2C+I">Isobel Ojalvo</a>, <a href="/search/cs?searchtype=author&amp;query=Elmer%2C+P">Peter Elmer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.06047v1-abstract-short" style="display: inline;"> Unsupervised deep learning techniques are widely used to identify anomalous behaviour. The performance of such methods is a product of the amount of training data and the model size. However, the size is often a limiting factor for the deployment on resource-constrained devices. We present a novel procedure based on knowledge distillation for compressing an unsupervised anomaly detection model int&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.06047v1-abstract-full').style.display = 'inline'; document.getElementById('2310.06047v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.06047v1-abstract-full" style="display: none;"> Unsupervised deep learning techniques are widely used to identify anomalous behaviour. The performance of such methods is a product of the amount of training data and the model size. However, the size is often a limiting factor for the deployment on resource-constrained devices. We present a novel procedure based on knowledge distillation for compressing an unsupervised anomaly detection model into a supervised deployable one and we suggest a set of techniques to improve the detection sensitivity. Compressed models perform comparably to their larger counterparts while significantly reducing the size and memory footprint. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.06047v1-abstract-full').style.display = 'none'; document.getElementById('2310.06047v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.12265">arXiv:2309.12265</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.12265">pdf</a>, <a href="https://arxiv.org/ps/2309.12265">ps</a>, <a href="https://arxiv.org/format/2309.12265">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Discrete Mathematics">cs.DM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.46298/dmtcs.13113">10.46298/dmtcs.13113 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Cost-sharing in Parking Games </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Elder%2C+J">Jennifer Elder</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P+E">Pamela E. Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Kretschmann%2C+J">Jan Kretschmann</a>, <a href="/search/cs?searchtype=author&amp;query=Mori%2C+J+C+M">J. Carlos Mart铆nez Mori</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.12265v4-abstract-short" style="display: inline;"> In this paper, we study the total displacement statistic of parking functions from the perspective of cooperative game theory. We introduce parking games, which are coalitional cost-sharing games in characteristic function form derived from the total displacement statistic. We show that parking games are supermodular cost-sharing games, indicating that cooperation is difficult (i.e., their core is&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.12265v4-abstract-full').style.display = 'inline'; document.getElementById('2309.12265v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.12265v4-abstract-full" style="display: none;"> In this paper, we study the total displacement statistic of parking functions from the perspective of cooperative game theory. We introduce parking games, which are coalitional cost-sharing games in characteristic function form derived from the total displacement statistic. We show that parking games are supermodular cost-sharing games, indicating that cooperation is difficult (i.e., their core is empty). Next, we study their Shapley value, which formalizes a notion of &#34;fair&#34; cost-sharing and amounts to charging each car for its expected marginal displacement under a random arrival order. Our main contribution is a polynomial-time algorithm to compute the Shapley value of parking games, in contrast with known hardness results on computing the Shapley value of arbitrary games. The algorithm leverages the permutation-invariance of total displacement, combinatorial enumeration, and dynamic programming. We conclude with open questions around an alternative solution concept for supermodular cost-sharing games and connections to other areas in combinatorics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.12265v4-abstract-full').style.display = 'none'; document.getElementById('2309.12265v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 05A05; 91A12; 91A46 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Discrete Mathematics &amp; Theoretical Computer Science, vol. 26:3, Combinatorics (November 4, 2024) dmtcs:13113 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.12656">arXiv:2306.12656</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.12656">pdf</a>, <a href="https://arxiv.org/format/2306.12656">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s41666-023-00155-0">10.1007/s41666-023-00155-0 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Identifying and Extracting Rare Disease Phenotypes with Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shyr%2C+C">Cathy Shyr</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Y">Yan Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P+A">Paul A. Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+H">Hua Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.12656v1-abstract-short" style="display: inline;"> Rare diseases (RDs) are collectively common and affect 300 million people worldwide. Accurate phenotyping is critical for informing diagnosis and treatment, but RD phenotypes are often embedded in unstructured text and time-consuming to extract manually. While natural language processing (NLP) models can perform named entity recognition (NER) to automate extraction, a major bottleneck is the devel&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.12656v1-abstract-full').style.display = 'inline'; document.getElementById('2306.12656v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.12656v1-abstract-full" style="display: none;"> Rare diseases (RDs) are collectively common and affect 300 million people worldwide. Accurate phenotyping is critical for informing diagnosis and treatment, but RD phenotypes are often embedded in unstructured text and time-consuming to extract manually. While natural language processing (NLP) models can perform named entity recognition (NER) to automate extraction, a major bottleneck is the development of a large, annotated corpus for model training. Recently, prompt learning emerged as an NLP paradigm that can lead to more generalizable results without any (zero-shot) or few labeled samples (few-shot). Despite growing interest in ChatGPT, a revolutionary large language model capable of following complex human prompts and generating high-quality responses, none have studied its NER performance for RDs in the zero- and few-shot settings. To this end, we engineered novel prompts aimed at extracting RD phenotypes and, to the best of our knowledge, are the first the establish a benchmark for evaluating ChatGPT&#39;s performance in these settings. We compared its performance to the traditional fine-tuning approach and conducted an in-depth error analysis. Overall, fine-tuning BioClinicalBERT resulted in higher performance (F1 of 0.689) than ChatGPT (F1 of 0.472 and 0.591 in the zero- and few-shot settings, respectively). Despite this, ChatGPT achieved similar or higher accuracy for certain entities (i.e., rare diseases and signs) in the one-shot setting (F1 of 0.776 and 0.725). This suggests that with appropriate prompt engineering, ChatGPT has the potential to match or outperform fine-tuned language models for certain entity types with just one labeled sample. While the proliferation of large language models may provide opportunities for supporting RD diagnosis and treatment, researchers and clinicians should critically evaluate model outputs and be well-informed of their limitations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.12656v1-abstract-full').style.display = 'none'; document.getElementById('2306.12656v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> J Healthc Inform Res 8, 438-461 (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.04099">arXiv:2305.04099</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.04099">pdf</a>, <a href="https://arxiv.org/format/2305.04099">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1051/epjconf/202429509036">10.1051/epjconf/202429509036 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Symbolic Regression on FPGAs for Fast Machine Learning Inference </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tsoi%2C+H+F">Ho Fung Tsoi</a>, <a href="/search/cs?searchtype=author&amp;query=Pol%2C+A+A">Adrian Alan Pol</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Govorkova%2C+E">Ekaterina Govorkova</a>, <a href="/search/cs?searchtype=author&amp;query=Cranmer%2C+M">Miles Cranmer</a>, <a href="/search/cs?searchtype=author&amp;query=Dasu%2C+S">Sridhara Dasu</a>, <a href="/search/cs?searchtype=author&amp;query=Elmer%2C+P">Peter Elmer</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Ojalvo%2C+I">Isobel Ojalvo</a>, <a href="/search/cs?searchtype=author&amp;query=Pierini%2C+M">Maurizio Pierini</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.04099v2-abstract-short" style="display: inline;"> The high-energy physics community is investigating the potential of deploying machine-learning-based solutions on Field-Programmable Gate Arrays (FPGAs) to enhance physics sensitivity while still meeting data processing time constraints. In this contribution, we introduce a novel end-to-end procedure that utilizes a machine learning technique called symbolic regression (SR). It searches the equati&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.04099v2-abstract-full').style.display = 'inline'; document.getElementById('2305.04099v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.04099v2-abstract-full" style="display: none;"> The high-energy physics community is investigating the potential of deploying machine-learning-based solutions on Field-Programmable Gate Arrays (FPGAs) to enhance physics sensitivity while still meeting data processing time constraints. In this contribution, we introduce a novel end-to-end procedure that utilizes a machine learning technique called symbolic regression (SR). It searches the equation space to discover algebraic relations approximating a dataset. We use PySR (a software to uncover these expressions based on an evolutionary algorithm) and extend the functionality of hls4ml (a package for machine learning inference in FPGAs) to support PySR-generated expressions for resource-constrained production environments. Deep learning models often optimize the top metric by pinning the network size because the vast hyperparameter space prevents an extensive search for neural architecture. Conversely, SR selects a set of models on the Pareto front, which allows for optimizing the performance-resource trade-off directly. By embedding symbolic forms, our implementation can dramatically reduce the computational resources needed to perform critical tasks. We validate our method on a physics benchmark: the multiclass classification of jets produced in simulated proton-proton collisions at the CERN Large Hadron Collider. We show that our approach can approximate a 3-layer neural network using an inference model that achieves up to a 13-fold decrease in execution time, down to 5 ns, while still preserving more than 90% approximation accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.04099v2-abstract-full').style.display = 'none'; document.getElementById('2305.04099v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages. Accepted to 26th International Conference on Computing in High Energy &amp; Nuclear Physics (CHEP 2023)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> EPJ Web of Conferences 295, 09036 (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.02577">arXiv:2304.02577</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2304.02577">pdf</a>, <a href="https://arxiv.org/format/2304.02577">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> ECG Feature Importance Rankings: Cardiologists vs. Algorithms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mehari%2C+T">Temesgen Mehari</a>, <a href="/search/cs?searchtype=author&amp;query=Sundar%2C+A">Ashish Sundar</a>, <a href="/search/cs?searchtype=author&amp;query=Bosnjakovic%2C+A">Alen Bosnjakovic</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Peter Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Williams%2C+S+E">Steven E. Williams</a>, <a href="/search/cs?searchtype=author&amp;query=Loewe%2C+A">Axel Loewe</a>, <a href="/search/cs?searchtype=author&amp;query=Doessel%2C+O">Olaf Doessel</a>, <a href="/search/cs?searchtype=author&amp;query=Nagel%2C+C">Claudia Nagel</a>, <a href="/search/cs?searchtype=author&amp;query=Strodthoff%2C+N">Nils Strodthoff</a>, <a href="/search/cs?searchtype=author&amp;query=Aston%2C+P+J">Philip J. Aston</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.02577v1-abstract-short" style="display: inline;"> Feature importance methods promise to provide a ranking of features according to importance for a given classification task. A wide range of methods exist but their rankings often disagree and they are inherently difficult to evaluate due to a lack of ground truth beyond synthetic datasets. In this work, we put feature importance methods to the test on real-world data in the domain of cardiology,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.02577v1-abstract-full').style.display = 'inline'; document.getElementById('2304.02577v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.02577v1-abstract-full" style="display: none;"> Feature importance methods promise to provide a ranking of features according to importance for a given classification task. A wide range of methods exist but their rankings often disagree and they are inherently difficult to evaluate due to a lack of ground truth beyond synthetic datasets. In this work, we put feature importance methods to the test on real-world data in the domain of cardiology, where we try to distinguish three specific pathologies from healthy subjects based on ECG features comparing to features used in cardiologists&#39; decision rules as ground truth. Some methods generally performed well and others performed poorly, while some methods did well on some but not all of the problems considered. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.02577v1-abstract-full').style.display = 'none'; document.getElementById('2304.02577v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.04633">arXiv:2301.04633</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2301.04633">pdf</a>, <a href="https://arxiv.org/ps/2301.04633">ps</a>, <a href="https://arxiv.org/format/2301.04633">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s41781-023-00101-0">10.1007/s41781-023-00101-0 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Accelerating Machine Learning Inference with GPUs in ProtoDUNE Data Processing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cai%2C+T">Tejin Cai</a>, <a href="/search/cs?searchtype=author&amp;query=Herner%2C+K">Kenneth Herner</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+T">Tingjun Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+M">Michael Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Flechas%2C+M+A">Maria Acosta Flechas</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Holzman%2C+B">Burt Holzman</a>, <a href="/search/cs?searchtype=author&amp;query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.04633v2-abstract-short" style="display: inline;"> We study the performance of a cloud-based GPU-accelerated inference server to speed up event reconstruction in neutrino data batch jobs. Using detector data from the ProtoDUNE experiment and employing the standard DUNE grid job submission tools, we attempt to reprocess the data by running several thousand concurrent grid jobs, a rate we expect to be typical of current and future neutrino physics e&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.04633v2-abstract-full').style.display = 'inline'; document.getElementById('2301.04633v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.04633v2-abstract-full" style="display: none;"> We study the performance of a cloud-based GPU-accelerated inference server to speed up event reconstruction in neutrino data batch jobs. Using detector data from the ProtoDUNE experiment and employing the standard DUNE grid job submission tools, we attempt to reprocess the data by running several thousand concurrent grid jobs, a rate we expect to be typical of current and future neutrino physics experiments. We process most of the dataset with the GPU version of our processing algorithm and the remainder with the CPU version for timing comparisons. We find that a 100-GPU cloud-based server is able to easily meet the processing demand, and that using the GPU version of the event processing algorithm is two times faster than processing these data with the CPU version when comparing to the newest CPUs in our sample. The amount of data transferred to the inference server during the GPU runs can overwhelm even the highest-bandwidth network switches, however, unless care is taken to observe network facility limits or otherwise distribute the jobs to multiple sites. We discuss the lessons learned from this processing campaign and several avenues for future improvements. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.04633v2-abstract-full').style.display = 'none'; document.getElementById('2301.04633v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 9 figures, matches accepted version</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-22-944-ND-PPD-SCD </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Comput Softw Big Sci 7, 11 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.05081">arXiv:2212.05081</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2212.05081">pdf</a>, <a href="https://arxiv.org/format/2212.05081">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/2632-2153/ad12e3">10.1088/2632-2153/ad12e3 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> FAIR AI Models in High Energy Physics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Haoyang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Roy%2C+A">Avik Roy</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+R">Ruike Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Huerta%2C+E+A">E. A. Huerta</a>, <a href="/search/cs?searchtype=author&amp;query=Diaz%2C+D">Daniel Diaz</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Kansal%2C+R">Raghav Kansal</a>, <a href="/search/cs?searchtype=author&amp;query=Katz%2C+D+S">Daniel S. Katz</a>, <a href="/search/cs?searchtype=author&amp;query=Kavoori%2C+I+H">Ishaan H. Kavoori</a>, <a href="/search/cs?searchtype=author&amp;query=Kindratenko%2C+V+V">Volodymyr V. Kindratenko</a>, <a href="/search/cs?searchtype=author&amp;query=Mokhtar%2C+F">Farouk Mokhtar</a>, <a href="/search/cs?searchtype=author&amp;query=Neubauer%2C+M+S">Mark S. Neubauer</a>, <a href="/search/cs?searchtype=author&amp;query=Park%2C+S+E">Sang Eon Park</a>, <a href="/search/cs?searchtype=author&amp;query=Quinnan%2C+M">Melissa Quinnan</a>, <a href="/search/cs?searchtype=author&amp;query=Rusack%2C+R">Roger Rusack</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Z">Zhizhen Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.05081v3-abstract-short" style="display: inline;"> The findable, accessible, interoperable, and reusable (FAIR) data principles provide a framework for examining, evaluating, and improving how data is shared to facilitate scientific discovery. Generalizing these principles to research software and other digital products is an active area of research. Machine learning (ML) models -- algorithms that have been trained on data without being explicitly&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.05081v3-abstract-full').style.display = 'inline'; document.getElementById('2212.05081v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.05081v3-abstract-full" style="display: none;"> The findable, accessible, interoperable, and reusable (FAIR) data principles provide a framework for examining, evaluating, and improving how data is shared to facilitate scientific discovery. Generalizing these principles to research software and other digital products is an active area of research. Machine learning (ML) models -- algorithms that have been trained on data without being explicitly programmed -- and more generally, artificial intelligence (AI) models, are an important target for this because of the ever-increasing pace with which AI is transforming scientific domains, such as experimental high energy physics (HEP). In this paper, we propose a practical definition of FAIR principles for AI models in HEP and describe a template for the application of these principles. We demonstrate the template&#39;s use with an example AI model applied to HEP, in which a graph neural network is used to identify Higgs bosons decaying to two bottom quarks. We report on the robustness of this FAIR AI model, its portability across hardware architectures and software frameworks, and its interpretability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.05081v3-abstract-full').style.display = 'none'; document.getElementById('2212.05081v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">34 pages, 9 figures, 10 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Mach. Learn.: Sci. Technol. 4 (2023) 045062 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.08973">arXiv:2210.08973</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2210.08973">pdf</a>, <a href="https://arxiv.org/ps/2210.08973">ps</a>, <a href="https://arxiv.org/format/2210.08973">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41597-023-02298-6">10.1038/s41597-023-02298-6 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> FAIR for AI: An interdisciplinary and international community building perspective </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huerta%2C+E+A">E. A. Huerta</a>, <a href="/search/cs?searchtype=author&amp;query=Blaiszik%2C+B">Ben Blaiszik</a>, <a href="/search/cs?searchtype=author&amp;query=Brinson%2C+L+C">L. Catherine Brinson</a>, <a href="/search/cs?searchtype=author&amp;query=Bouchard%2C+K+E">Kristofer E. Bouchard</a>, <a href="/search/cs?searchtype=author&amp;query=Diaz%2C+D">Daniel Diaz</a>, <a href="/search/cs?searchtype=author&amp;query=Doglioni%2C+C">Caterina Doglioni</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J+M">Javier M. Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Emani%2C+M">Murali Emani</a>, <a href="/search/cs?searchtype=author&amp;query=Foster%2C+I">Ian Foster</a>, <a href="/search/cs?searchtype=author&amp;query=Fox%2C+G">Geoffrey Fox</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Heinrich%2C+L">Lukas Heinrich</a>, <a href="/search/cs?searchtype=author&amp;query=Jha%2C+S">Shantenu Jha</a>, <a href="/search/cs?searchtype=author&amp;query=Katz%2C+D+S">Daniel S. Katz</a>, <a href="/search/cs?searchtype=author&amp;query=Kindratenko%2C+V">Volodymyr Kindratenko</a>, <a href="/search/cs?searchtype=author&amp;query=Kirkpatrick%2C+C+R">Christine R. Kirkpatrick</a>, <a href="/search/cs?searchtype=author&amp;query=Lassila-Perini%2C+K">Kati Lassila-Perini</a>, <a href="/search/cs?searchtype=author&amp;query=Madduri%2C+R+K">Ravi K. Madduri</a>, <a href="/search/cs?searchtype=author&amp;query=Neubauer%2C+M+S">Mark S. Neubauer</a>, <a href="/search/cs?searchtype=author&amp;query=Psomopoulos%2C+F+E">Fotis E. Psomopoulos</a>, <a href="/search/cs?searchtype=author&amp;query=Roy%2C+A">Avik Roy</a>, <a href="/search/cs?searchtype=author&amp;query=R%C3%BCbel%2C+O">Oliver R眉bel</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Z">Zhizhen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+R">Ruike Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.08973v2-abstract-short" style="display: inline;"> A foundational set of findable, accessible, interoperable, and reusable (FAIR) principles were proposed in 2016 as prerequisites for proper data management and stewardship, with the goal of enabling the reusability of scholarly data. The principles were also meant to apply to other digital assets, at a high level, and over time, the FAIR guiding principles have been re-interpreted or extended to i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.08973v2-abstract-full').style.display = 'inline'; document.getElementById('2210.08973v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.08973v2-abstract-full" style="display: none;"> A foundational set of findable, accessible, interoperable, and reusable (FAIR) principles were proposed in 2016 as prerequisites for proper data management and stewardship, with the goal of enabling the reusability of scholarly data. The principles were also meant to apply to other digital assets, at a high level, and over time, the FAIR guiding principles have been re-interpreted or extended to include the software, tools, algorithms, and workflows that produce data. FAIR principles are now being adapted in the context of AI models and datasets. Here, we present the perspectives, vision, and experiences of researchers from different countries, disciplines, and backgrounds who are leading the definition and adoption of FAIR principles in their communities of practice, and discuss outcomes that may result from pursuing and incentivizing FAIR AI research. The material for this report builds on the FAIR for AI Workshop held at Argonne National Laboratory on June 7, 2022. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.08973v2-abstract-full').style.display = 'none'; document.getElementById('2210.08973v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, comments welcome!; v2: 12 pages, accepted to Scientific Data</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.0; E.0 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Scientific Data 10, 487 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.05484">arXiv:2208.05484</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2208.05484">pdf</a>, <a href="https://arxiv.org/format/2208.05484">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/JHEP07(2023)108">10.1007/JHEP07(2023)108 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Neural Embedding: Learning the Embedding of the Manifold of Physics Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Park%2C+S+E">Sang Eon Park</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Ostdiek%2C+B">Bryan Ostdiek</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.05484v2-abstract-short" style="display: inline;"> In this paper, we present a method of embedding physics data manifolds with metric structure into lower dimensional spaces with simpler metrics, such as Euclidean and Hyperbolic spaces. We then demonstrate that it can be a powerful step in the data analysis pipeline for many applications. Using progressively more realistic simulated collisions at the Large Hadron Collider, we show that this embedd&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.05484v2-abstract-full').style.display = 'inline'; document.getElementById('2208.05484v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.05484v2-abstract-full" style="display: none;"> In this paper, we present a method of embedding physics data manifolds with metric structure into lower dimensional spaces with simpler metrics, such as Euclidean and Hyperbolic spaces. We then demonstrate that it can be a powerful step in the data analysis pipeline for many applications. Using progressively more realistic simulated collisions at the Large Hadron Collider, we show that this embedding approach learns the underlying latent structure. With the notion of volume in Euclidean spaces, we provide for the first time a viable solution to quantifying the true search capability of model agnostic search algorithms in collider physics (i.e. anomaly detection). Finally, we discuss how the ideas presented in this paper can be employed to solve many practical challenges that require the extraction of physically meaningful representations from information in complex high dimensional datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.05484v2-abstract-full').style.display = 'none'; document.getElementById('2208.05484v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.09060">arXiv:2207.09060</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2207.09060">pdf</a>, <a href="https://arxiv.org/format/2207.09060">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics Education">physics.ed-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> </div> </div> <p class="title is-5 mathjax"> Data Science and Machine Learning in Education </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Benelli%2C+G">Gabriele Benelli</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+T+Y">Thomas Y. Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Feickert%2C+M">Matthew Feickert</a>, <a href="/search/cs?searchtype=author&amp;query=Graham%2C+M">Matthew Graham</a>, <a href="/search/cs?searchtype=author&amp;query=Gray%2C+L">Lindsey Gray</a>, <a href="/search/cs?searchtype=author&amp;query=Hackett%2C+D">Dan Hackett</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Phil Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Hsu%2C+S">Shih-Chieh Hsu</a>, <a href="/search/cs?searchtype=author&amp;query=Kasieczka%2C+G">Gregor Kasieczka</a>, <a href="/search/cs?searchtype=author&amp;query=Khoda%2C+E+E">Elham E. Khoda</a>, <a href="/search/cs?searchtype=author&amp;query=Komm%2C+M">Matthias Komm</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Mia Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Neubauer%2C+M+S">Mark S. Neubauer</a>, <a href="/search/cs?searchtype=author&amp;query=Norberg%2C+S">Scarlet Norberg</a>, <a href="/search/cs?searchtype=author&amp;query=Perloff%2C+A">Alexx Perloff</a>, <a href="/search/cs?searchtype=author&amp;query=Rieger%2C+M">Marcel Rieger</a>, <a href="/search/cs?searchtype=author&amp;query=Savard%2C+C">Claire Savard</a>, <a href="/search/cs?searchtype=author&amp;query=Terao%2C+K">Kazuhiro Terao</a>, <a href="/search/cs?searchtype=author&amp;query=Thais%2C+S">Savannah Thais</a>, <a href="/search/cs?searchtype=author&amp;query=Roy%2C+A">Avik Roy</a>, <a href="/search/cs?searchtype=author&amp;query=Vlimant%2C+J">Jean-Roch Vlimant</a>, <a href="/search/cs?searchtype=author&amp;query=Chachamis%2C+G">Grigorios Chachamis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.09060v1-abstract-short" style="display: inline;"> The growing role of data science (DS) and machine learning (ML) in high-energy physics (HEP) is well established and pertinent given the complex detectors, large data, sets and sophisticated analyses at the heart of HEP research. Moreover, exploiting symmetries inherent in physics data have inspired physics-informed ML as a vibrant sub-field of computer science research. HEP researchers benefit gr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.09060v1-abstract-full').style.display = 'inline'; document.getElementById('2207.09060v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.09060v1-abstract-full" style="display: none;"> The growing role of data science (DS) and machine learning (ML) in high-energy physics (HEP) is well established and pertinent given the complex detectors, large data, sets and sophisticated analyses at the heart of HEP research. Moreover, exploiting symmetries inherent in physics data have inspired physics-informed ML as a vibrant sub-field of computer science research. HEP researchers benefit greatly from materials widely available materials for use in education, training and workforce development. They are also contributing to these materials and providing software to DS/ML-related fields. Increasingly, physics departments are offering courses at the intersection of DS, ML and physics, often using curricula developed by HEP researchers and involving open software and data used in HEP. In this white paper, we explore synergies between HEP research and DS/ML education, discuss opportunities and challenges at this intersection, and propose community activities that will be mutually beneficial. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.09060v1-abstract-full').style.display = 'none'; document.getElementById('2207.09060v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Contribution to Snowmass 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.00559">arXiv:2207.00559</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2207.00559">pdf</a>, <a href="https://arxiv.org/format/2207.00559">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Ultra-low latency recurrent neural network inference on FPGAs for physics applications with hls4ml </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Khoda%2C+E+E">Elham E Khoda</a>, <a href="/search/cs?searchtype=author&amp;query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&amp;query=de+Lima%2C+R+T">Rafael Teixeira de Lima</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Hauck%2C+S">Scott Hauck</a>, <a href="/search/cs?searchtype=author&amp;query=Hsu%2C+S">Shih-Chieh Hsu</a>, <a href="/search/cs?searchtype=author&amp;query=Kagan%2C+M">Michael Kagan</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Paikara%2C+C">Chaitanya Paikara</a>, <a href="/search/cs?searchtype=author&amp;query=Rao%2C+R">Richa Rao</a>, <a href="/search/cs?searchtype=author&amp;query=Summers%2C+S">Sioni Summers</a>, <a href="/search/cs?searchtype=author&amp;query=Vernieri%2C+C">Caterina Vernieri</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+A">Aaron Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.00559v1-abstract-short" style="display: inline;"> Recurrent neural networks have been shown to be effective architectures for many tasks in high energy physics, and thus have been widely adopted. Their use in low-latency environments has, however, been limited as a result of the difficulties of implementing recurrent architectures on field-programmable gate arrays (FPGAs). In this paper we present an implementation of two types of recurrent neura&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.00559v1-abstract-full').style.display = 'inline'; document.getElementById('2207.00559v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.00559v1-abstract-full" style="display: none;"> Recurrent neural networks have been shown to be effective architectures for many tasks in high energy physics, and thus have been widely adopted. Their use in low-latency environments has, however, been limited as a result of the difficulties of implementing recurrent architectures on field-programmable gate arrays (FPGAs). In this paper we present an implementation of two types of recurrent neural network layers -- long short-term memory and gated recurrent unit -- within the hls4ml framework. We demonstrate that our implementation is capable of producing effective designs for both small and large models, and can be customized to meet specific design requirements for inference latencies and FPGA resources. We show the performance and synthesized designs for multiple neural networks, many of which are trained specifically for jet identification tasks at the CERN Large Hadron Collider. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.00559v1-abstract-full').style.display = 'none'; document.getElementById('2207.00559v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 6 figures, 5 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.07690">arXiv:2205.07690</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.07690">pdf</a>, <a href="https://arxiv.org/format/2205.07690">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Real-time semantic segmentation on FPGAs for autonomous vehicles with hls4ml </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ghielmetti%2C+N">Nicol貌 Ghielmetti</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&amp;query=Roed%2C+M">Marcel Roed</a>, <a href="/search/cs?searchtype=author&amp;query=Summers%2C+S">Sioni Summers</a>, <a href="/search/cs?searchtype=author&amp;query=Aarrestad%2C+T">Thea Aarrestad</a>, <a href="/search/cs?searchtype=author&amp;query=Petersson%2C+C">Christoffer Petersson</a>, <a href="/search/cs?searchtype=author&amp;query=Linander%2C+H">Hampus Linander</a>, <a href="/search/cs?searchtype=author&amp;query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+K">Kelvin Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.07690v1-abstract-short" style="display: inline;"> In this paper, we investigate how field programmable gate arrays can serve as hardware accelerators for real-time semantic segmentation tasks relevant for autonomous driving. Considering compressed versions of the ENet convolutional neural network architecture, we demonstrate a fully-on-chip deployment with a latency of 4.9 ms per image, using less than 30% of the available resources on a Xilinx Z&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.07690v1-abstract-full').style.display = 'inline'; document.getElementById('2205.07690v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.07690v1-abstract-full" style="display: none;"> In this paper, we investigate how field programmable gate arrays can serve as hardware accelerators for real-time semantic segmentation tasks relevant for autonomous driving. Considering compressed versions of the ENet convolutional neural network architecture, we demonstrate a fully-on-chip deployment with a latency of 4.9 ms per image, using less than 30% of the available resources on a Xilinx ZCU102 evaluation board. The latency is reduced to 3 ms per image when increasing the batch size to ten, corresponding to the use case where the autonomous vehicle receives inputs from multiple cameras simultaneously. We show, through aggressive filter reduction and heterogeneous quantization-aware training, and an optimized implementation of convolutional layers, that the power consumption and resource utilization can be significantly reduced while maintaining accuracy on the Cityscapes dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.07690v1-abstract-full').style.display = 'none'; document.getElementById('2205.07690v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 6 tables, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.16255">arXiv:2203.16255</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.16255">pdf</a>, <a href="https://arxiv.org/format/2203.16255">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="General Relativity and Quantum Cosmology">gr-qc</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> </div> <p class="title is-5 mathjax"> Physics Community Needs, Tools, and Resources for Machine Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Katsavounidis%2C+E">Erik Katsavounidis</a>, <a href="/search/cs?searchtype=author&amp;query=McCormack%2C+W+P">William Patrick McCormack</a>, <a href="/search/cs?searchtype=author&amp;query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+Y">Yongbin Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Gandrakota%2C+A">Abhijith Gandrakota</a>, <a href="/search/cs?searchtype=author&amp;query=Herwig%2C+C">Christian Herwig</a>, <a href="/search/cs?searchtype=author&amp;query=Holzman%2C+B">Burt Holzman</a>, <a href="/search/cs?searchtype=author&amp;query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+T">Tingjun Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&amp;query=Coughlin%2C+M">Michael Coughlin</a>, <a href="/search/cs?searchtype=author&amp;query=Hauck%2C+S">Scott Hauck</a>, <a href="/search/cs?searchtype=author&amp;query=Hsu%2C+S">Shih-Chieh Hsu</a>, <a href="/search/cs?searchtype=author&amp;query=Khoda%2C+E+E">Elham E Khoda</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+D">Deming Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Neubauer%2C+M">Mark Neubauer</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Karagiorgi%2C+G">Georgia Karagiorgi</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Mia Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.16255v1-abstract-short" style="display: inline;"> Machine learning (ML) is becoming an increasingly important component of cutting-edge physics research, but its computational requirements present significant challenges. In this white paper, we discuss the needs of the physics community regarding ML across latency and throughput regimes, the tools and resources that offer the possibility of addressing these needs, and how these can be best utiliz&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.16255v1-abstract-full').style.display = 'inline'; document.getElementById('2203.16255v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.16255v1-abstract-full" style="display: none;"> Machine learning (ML) is becoming an increasingly important component of cutting-edge physics research, but its computational requirements present significant challenges. In this white paper, we discuss the needs of the physics community regarding ML across latency and throughput regimes, the tools and resources that offer the possibility of addressing these needs, and how these can be best utilized and accessed in the coming years. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.16255v1-abstract-full').style.display = 'none'; document.getElementById('2203.16255v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Contribution to Snowmass 2021, 33 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.13041">arXiv:2110.13041</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.13041">pdf</a>, <a href="https://arxiv.org/format/2110.13041">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.3389/fdata.2022.787421">10.3389/fdata.2022.787421 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Applications and Techniques for Fast Machine Learning in Science </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deiana%2C+A+M">Allison McCarn Deiana</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Agar%2C+J">Joshua Agar</a>, <a href="/search/cs?searchtype=author&amp;query=Blott%2C+M">Michaela Blott</a>, <a href="/search/cs?searchtype=author&amp;query=Di+Guglielmo%2C+G">Giuseppe Di Guglielmo</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Hauck%2C+S">Scott Hauck</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Mia Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Neubauer%2C+M+S">Mark S. Neubauer</a>, <a href="/search/cs?searchtype=author&amp;query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&amp;query=Ogrenci-Memik%2C+S">Seda Ogrenci-Memik</a>, <a href="/search/cs?searchtype=author&amp;query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&amp;query=Aarrestad%2C+T">Thea Aarrestad</a>, <a href="/search/cs?searchtype=author&amp;query=Bahr%2C+S">Steffen Bahr</a>, <a href="/search/cs?searchtype=author&amp;query=Becker%2C+J">Jurgen Becker</a>, <a href="/search/cs?searchtype=author&amp;query=Berthold%2C+A">Anne-Sophie Berthold</a>, <a href="/search/cs?searchtype=author&amp;query=Bonventre%2C+R+J">Richard J. Bonventre</a>, <a href="/search/cs?searchtype=author&amp;query=Bravo%2C+T+E+M">Tomas E. Muller Bravo</a>, <a href="/search/cs?searchtype=author&amp;query=Diefenthaler%2C+M">Markus Diefenthaler</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+Z">Zhen Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Fritzsche%2C+N">Nick Fritzsche</a>, <a href="/search/cs?searchtype=author&amp;query=Gholami%2C+A">Amir Gholami</a>, <a href="/search/cs?searchtype=author&amp;query=Govorkova%2C+E">Ekaterina Govorkova</a>, <a href="/search/cs?searchtype=author&amp;query=Hazelwood%2C+K+J">Kyle J Hazelwood</a> , et al. (62 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.13041v1-abstract-short" style="display: inline;"> In this community review report, we discuss applications and techniques for fast machine learning (ML) in science -- the concept of integrating power ML methods into the real-time experimental data processing loop to accelerate scientific discovery. The material for the report builds on two workshops held by the Fast ML for Science community and covers three main areas: applications for fast ML ac&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.13041v1-abstract-full').style.display = 'inline'; document.getElementById('2110.13041v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.13041v1-abstract-full" style="display: none;"> In this community review report, we discuss applications and techniques for fast machine learning (ML) in science -- the concept of integrating power ML methods into the real-time experimental data processing loop to accelerate scientific discovery. The material for the report builds on two workshops held by the Fast ML for Science community and covers three main areas: applications for fast ML across a number of scientific domains; techniques for training and implementing performant and resource-efficient ML algorithms; and computing architectures, platforms, and technologies for deploying these algorithms. We also present overlapping challenges across the multiple scientific domains where common solutions can be found. This community report is intended to give plenty of examples and inspiration for scientific discovery through integrated and accelerated ML solutions. This is followed by a high-level overview and organization of technical advances, including an abundance of pointers to source material, which can enable these breakthroughs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.13041v1-abstract-full').style.display = 'none'; document.getElementById('2110.13041v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">66 pages, 13 figures, 5 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-21-502-AD-E-SCD </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Front. Big Data 5, 787421 (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.02214">arXiv:2108.02214</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2108.02214">pdf</a>, <a href="https://arxiv.org/format/2108.02214">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41597-021-01109-0">10.1038/s41597-021-01109-0 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A FAIR and AI-ready Higgs boson decay dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yifan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Huerta%2C+E+A">E. A. Huerta</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Katz%2C+D+S">Daniel S. Katz</a>, <a href="/search/cs?searchtype=author&amp;query=Neubauer%2C+M+S">Mark S. Neubauer</a>, <a href="/search/cs?searchtype=author&amp;query=Diaz%2C+D">Daniel Diaz</a>, <a href="/search/cs?searchtype=author&amp;query=Mokhtar%2C+F">Farouk Mokhtar</a>, <a href="/search/cs?searchtype=author&amp;query=Kansal%2C+R">Raghav Kansal</a>, <a href="/search/cs?searchtype=author&amp;query=Park%2C+S+E">Sang Eon Park</a>, <a href="/search/cs?searchtype=author&amp;query=Kindratenko%2C+V+V">Volodymyr V. Kindratenko</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Z">Zhizhen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Rusack%2C+R">Roger Rusack</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.02214v2-abstract-short" style="display: inline;"> To enable the reusability of massive scientific datasets by humans and machines, researchers aim to adhere to the principles of findability, accessibility, interoperability, and reusability (FAIR) for data and artificial intelligence (AI) models. This article provides a domain-agnostic, step-by-step assessment guide to evaluate whether or not a given dataset meets these principles. We demonstrate&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.02214v2-abstract-full').style.display = 'inline'; document.getElementById('2108.02214v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.02214v2-abstract-full" style="display: none;"> To enable the reusability of massive scientific datasets by humans and machines, researchers aim to adhere to the principles of findability, accessibility, interoperability, and reusability (FAIR) for data and artificial intelligence (AI) models. This article provides a domain-agnostic, step-by-step assessment guide to evaluate whether or not a given dataset meets these principles. We demonstrate how to use this guide to evaluate the FAIRness of an open simulated dataset produced by the CMS Collaboration at the CERN Large Hadron Collider. This dataset consists of Higgs boson decays and quark and gluon background, and is available through the CERN Open Data Portal. We use additional available tools to assess the FAIRness of this dataset, and incorporate feedback from members of the FAIR community to validate our results. This article is accompanied by a Jupyter notebook to visualize and explore this dataset. This study marks the first in a planned series of articles that will guide scientists in the creation of FAIR AI models and datasets in high energy particle physics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.02214v2-abstract-full').style.display = 'none'; document.getElementById('2108.02214v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 3 figures. v2: Accepted to Nature Scientific Data. Learn about the FAIR4HEP project at https://fair4hep.github.io. See our invited Behind the Paper Blog in Springer Nature Research Data Community at https://go.nature.com/3oMVYxo</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2; J.2 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Scientific Data volume 9, Article number: 31 (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.01995">arXiv:2108.01995</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2108.01995">pdf</a>, <a href="https://arxiv.org/format/2108.01995">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1098/rsta.2020.0262">10.1098/rsta.2020.0262 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Robustness of convolutional neural networks to physiological ECG noise </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Venton%2C+J">J. Venton</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P+M">P. M. Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Sundar%2C+A">A. Sundar</a>, <a href="/search/cs?searchtype=author&amp;query=Smith%2C+N+A+S">N. A. S. Smith</a>, <a href="/search/cs?searchtype=author&amp;query=Aston%2C+P+J">P. J. Aston</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.01995v1-abstract-short" style="display: inline;"> The electrocardiogram (ECG) is one of the most widespread diagnostic tools in healthcare and supports the diagnosis of cardiovascular disorders. Deep learning methods are a successful and popular technique to detect indications of disorders from an ECG signal. However, there are open questions around the robustness of these methods to various factors, including physiological ECG noise. In this stu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.01995v1-abstract-full').style.display = 'inline'; document.getElementById('2108.01995v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.01995v1-abstract-full" style="display: none;"> The electrocardiogram (ECG) is one of the most widespread diagnostic tools in healthcare and supports the diagnosis of cardiovascular disorders. Deep learning methods are a successful and popular technique to detect indications of disorders from an ECG signal. However, there are open questions around the robustness of these methods to various factors, including physiological ECG noise. In this study we generate clean and noisy versions of an ECG dataset before applying Symmetric Projection Attractor Reconstruction (SPAR) and scalogram image transformations. A pretrained convolutional neural network is trained using transfer learning to classify these image transforms. For the clean ECG dataset, F1 scores for SPAR attractor and scalogram transforms were 0.70 and 0.79, respectively, and the scores decreased by less than 0.05 for the noisy ECG datasets. Notably, when the network trained on clean data was used to classify the noisy datasets, performance decreases of up to 0.18 in F1 scores were seen. However, when the network trained on the noisy data was used to classify the clean dataset, the performance decrease was less than 0.05. We conclude that physiological ECG noise impacts classification using deep learning methods and careful consideration should be given to the inclusion of noisy ECG signals in the training data when developing supervised networks for ECG classification. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.01995v1-abstract-full').style.display = 'none'; document.getElementById('2108.01995v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.01683">arXiv:2105.01683</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2105.01683">pdf</a>, <a href="https://arxiv.org/format/2105.01683">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TNS.2021.3087100">10.1109/TNS.2021.3087100 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A reconfigurable neural network ASIC for detector front-end data compression at the HL-LHC </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Di+Guglielmo%2C+G">Giuseppe Di Guglielmo</a>, <a href="/search/cs?searchtype=author&amp;query=Fahim%2C+F">Farah Fahim</a>, <a href="/search/cs?searchtype=author&amp;query=Herwig%2C+C">Christian Herwig</a>, <a href="/search/cs?searchtype=author&amp;query=Valentin%2C+M+B">Manuel Blanco Valentin</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Gingu%2C+C">Cristian Gingu</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Hirschauer%2C+J">James Hirschauer</a>, <a href="/search/cs?searchtype=author&amp;query=Kwok%2C+M">Martin Kwok</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+Y">Yingyi Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Miranda%2C+L">Llovizna Miranda</a>, <a href="/search/cs?searchtype=author&amp;query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&amp;query=Noonan%2C+D">Daniel Noonan</a>, <a href="/search/cs?searchtype=author&amp;query=Ogrenci-Memik%2C+S">Seda Ogrenci-Memik</a>, <a href="/search/cs?searchtype=author&amp;query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&amp;query=Summers%2C+S">Sioni Summers</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.01683v1-abstract-short" style="display: inline;"> Despite advances in the programmable logic capabilities of modern trigger systems, a significant bottleneck remains in the amount of data to be transported from the detector to off-detector logic where trigger decisions are made. We demonstrate that a neural network autoencoder model can be implemented in a radiation tolerant ASIC to perform lossy data compression alleviating the data transmission&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.01683v1-abstract-full').style.display = 'inline'; document.getElementById('2105.01683v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.01683v1-abstract-full" style="display: none;"> Despite advances in the programmable logic capabilities of modern trigger systems, a significant bottleneck remains in the amount of data to be transported from the detector to off-detector logic where trigger decisions are made. We demonstrate that a neural network autoencoder model can be implemented in a radiation tolerant ASIC to perform lossy data compression alleviating the data transmission problem while preserving critical information of the detector energy profile. For our application, we consider the high-granularity calorimeter from the CMS experiment at the CERN Large Hadron Collider. The advantage of the machine learning approach is in the flexibility and configurability of the algorithm. By changing the neural network weights, a unique data compression algorithm can be deployed for each sensor in different detector regions, and changing detector or collider conditions. To meet area, performance, and power constraints, we perform a quantization-aware training to create an optimized neural network hardware implementation. The design is achieved through the use of high-level synthesis tools and the hls4ml framework, and was processed through synthesis and physical layout flows based on a LP CMOS 65 nm technology node. The flow anticipates 200 Mrad of ionizing radiation to select gates, and reports a total area of 3.6 mm^2 and consumes 95 mW of power. The simulated energy consumption per inference is 2.4 nJ. This is the first radiation tolerant on-detector ASIC implementation of a neural network that has been designed for particle physics applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.01683v1-abstract-full').style.display = 'none'; document.getElementById('2105.01683v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 8 figures, 3 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-21-217-CMS-E-SCD </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Trans. Nucl. Sci. 68, 2179 (2021) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.05579">arXiv:2103.05579</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2103.05579">pdf</a>, <a href="https://arxiv.org/format/2103.05579">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> </div> <p class="title is-5 mathjax"> hls4ml: An Open-Source Codesign Workflow to Empower Scientific Low-Power Machine Learning Devices </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Fahim%2C+F">Farah Fahim</a>, <a href="/search/cs?searchtype=author&amp;query=Hawks%2C+B">Benjamin Hawks</a>, <a href="/search/cs?searchtype=author&amp;query=Herwig%2C+C">Christian Herwig</a>, <a href="/search/cs?searchtype=author&amp;query=Hirschauer%2C+J">James Hirschauer</a>, <a href="/search/cs?searchtype=author&amp;query=Jindariani%2C+S">Sergo Jindariani</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Carloni%2C+L+P">Luca P. Carloni</a>, <a href="/search/cs?searchtype=author&amp;query=Di+Guglielmo%2C+G">Giuseppe Di Guglielmo</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Krupa%2C+J">Jeffrey Krupa</a>, <a href="/search/cs?searchtype=author&amp;query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&amp;query=Valentin%2C+M+B">Manuel Blanco Valentin</a>, <a href="/search/cs?searchtype=author&amp;query=Hester%2C+J">Josiah Hester</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+Y">Yingyi Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Mamish%2C+J">John Mamish</a>, <a href="/search/cs?searchtype=author&amp;query=Orgrenci-Memik%2C+S">Seda Orgrenci-Memik</a>, <a href="/search/cs?searchtype=author&amp;query=Aarrestad%2C+T">Thea Aarrestad</a>, <a href="/search/cs?searchtype=author&amp;query=Javed%2C+H">Hamza Javed</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&amp;query=Pol%2C+A+A">Adrian Alan Pol</a>, <a href="/search/cs?searchtype=author&amp;query=Summers%2C+S">Sioni Summers</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Hauck%2C+S">Scott Hauck</a>, <a href="/search/cs?searchtype=author&amp;query=Hsu%2C+S">Shih-Chieh Hsu</a> , et al. (5 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.05579v3-abstract-short" style="display: inline;"> Accessible machine learning algorithms, software, and diagnostic tools for energy-efficient devices and systems are extremely valuable across a broad range of application domains. In scientific domains, real-time near-sensor processing can drastically improve experimental design and accelerate scientific discoveries. To support domain scientists, we have developed hls4ml, an open-source software-h&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.05579v3-abstract-full').style.display = 'inline'; document.getElementById('2103.05579v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.05579v3-abstract-full" style="display: none;"> Accessible machine learning algorithms, software, and diagnostic tools for energy-efficient devices and systems are extremely valuable across a broad range of application domains. In scientific domains, real-time near-sensor processing can drastically improve experimental design and accelerate scientific discoveries. To support domain scientists, we have developed hls4ml, an open-source software-hardware codesign workflow to interpret and translate machine learning algorithms for implementation with both FPGA and ASIC technologies. We expand on previous hls4ml work by extending capabilities and techniques towards low-power implementations and increased usability: new Python APIs, quantization-aware pruning, end-to-end FPGA workflows, long pipeline kernels for low power, and new device backends include an ASIC workflow. Taken together, these and continued efforts in hls4ml will arm a new generation of domain scientists with accessible, efficient, and powerful tools for machine-learning-accelerated discovery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.05579v3-abstract-full').style.display = 'none'; document.getElementById('2103.05579v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 8 figures, TinyML Research Symposium 2021</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-CONF-21-080-SCD </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.00560">arXiv:2103.00560</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2103.00560">pdf</a>, <a href="https://arxiv.org/format/2103.00560">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1093/icb/icab107">10.1093/icb/icab107 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Perspectives on individual animal identification from biology and computer vision </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Vidal%2C+M">Maxime Vidal</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+N">Nathan Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Rosenberg%2C+B">Beth Rosenberg</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+B+P">Bradley P. Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Mathis%2C+A">Alexander Mathis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.00560v1-abstract-short" style="display: inline;"> Identifying individual animals is crucial for many biological investigations. In response to some of the limitations of current identification methods, new automated computer vision approaches have emerged with strong performance. Here, we review current advances of computer vision identification techniques to provide both computer scientists and biologists with an overview of the available tools&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.00560v1-abstract-full').style.display = 'inline'; document.getElementById('2103.00560v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.00560v1-abstract-full" style="display: none;"> Identifying individual animals is crucial for many biological investigations. In response to some of the limitations of current identification methods, new automated computer vision approaches have emerged with strong performance. Here, we review current advances of computer vision identification techniques to provide both computer scientists and biologists with an overview of the available tools and discuss their applications. We conclude by offering recommendations for starting an animal identification project, illustrate current limitations and propose how they might be addressed in the future. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.00560v1-abstract-full').style.display = 'none'; document.getElementById('2103.00560v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 1 figure, 2 boxes and 1 table</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Integr Comp Biol . 2021 Oct 4;61(3):900-916 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.05108">arXiv:2101.05108</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.05108">pdf</a>, <a href="https://arxiv.org/format/2101.05108">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/2632-2153/ac0ea1">10.1088/2632-2153/ac0ea1 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fast convolutional neural networks on FPGAs with hls4ml </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Aarrestad%2C+T">Thea Aarrestad</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Ghielmetti%2C+N">Nicol貌 Ghielmetti</a>, <a href="/search/cs?searchtype=author&amp;query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&amp;query=Summers%2C+S">Sioni Summers</a>, <a href="/search/cs?searchtype=author&amp;query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&amp;query=Petersson%2C+C">Christoffer Petersson</a>, <a href="/search/cs?searchtype=author&amp;query=Linander%2C+H">Hampus Linander</a>, <a href="/search/cs?searchtype=author&amp;query=Iiyama%2C+Y">Yutaro Iiyama</a>, <a href="/search/cs?searchtype=author&amp;query=Di+Guglielmo%2C+G">Giuseppe Di Guglielmo</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&amp;query=Jindariani%2C+S">Sergo Jindariani</a>, <a href="/search/cs?searchtype=author&amp;query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Mia Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Kreinar%2C+E">Edward Kreinar</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zhenbin Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Hoang%2C+D">Duc Hoang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.05108v2-abstract-short" style="display: inline;"> We introduce an automated tool for deploying ultra low-latency, low-power deep neural networks with convolutional layers on FPGAs. By extending the hls4ml library, we demonstrate an inference latency of $5\,渭$s using convolutional architectures, targeting microsecond latency applications like those at the CERN Large Hadron Collider. Considering benchmark models trained on the Street View House Num&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.05108v2-abstract-full').style.display = 'inline'; document.getElementById('2101.05108v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.05108v2-abstract-full" style="display: none;"> We introduce an automated tool for deploying ultra low-latency, low-power deep neural networks with convolutional layers on FPGAs. By extending the hls4ml library, we demonstrate an inference latency of $5\,渭$s using convolutional architectures, targeting microsecond latency applications like those at the CERN Large Hadron Collider. Considering benchmark models trained on the Street View House Numbers Dataset, we demonstrate various methods for model compression in order to fit the computational constraints of a typical FPGA device used in trigger and data acquisition systems of particle detectors. In particular, we discuss pruning and quantization-aware training, and demonstrate how resource utilization can be significantly reduced with little to no loss in model accuracy. We show that the FPGA critical resource consumption can be reduced by 97% with zero loss in model accuracy, and by 99% when tolerating a 6% accuracy degradation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.05108v2-abstract-full').style.display = 'none'; document.getElementById('2101.05108v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 18 figures, 4 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Mach. Learn.: Sci. Technol. 2 045015 (2021) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.01563">arXiv:2012.01563</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2012.01563">pdf</a>, <a href="https://arxiv.org/format/2012.01563">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> </div> </div> <p class="title is-5 mathjax"> Accelerated Charged Particle Tracking with Graph Neural Networks on FPGAs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Heintz%2C+A">Aneesh Heintz</a>, <a href="/search/cs?searchtype=author&amp;query=Razavimaleki%2C+V">Vesal Razavimaleki</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=DeZoort%2C+G">Gage DeZoort</a>, <a href="/search/cs?searchtype=author&amp;query=Ojalvo%2C+I">Isobel Ojalvo</a>, <a href="/search/cs?searchtype=author&amp;query=Thais%2C+S">Savannah Thais</a>, <a href="/search/cs?searchtype=author&amp;query=Atkinson%2C+M">Markus Atkinson</a>, <a href="/search/cs?searchtype=author&amp;query=Neubauer%2C+M">Mark Neubauer</a>, <a href="/search/cs?searchtype=author&amp;query=Gray%2C+L">Lindsey Gray</a>, <a href="/search/cs?searchtype=author&amp;query=Jindariani%2C+S">Sergo Jindariani</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&amp;query=Aarrestad%2C+T">Thea Aarrestad</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&amp;query=Summers%2C+S">Sioni Summers</a>, <a href="/search/cs?searchtype=author&amp;query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Mia Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Kreinar%2C+E">Edward Kreinar</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zhenbin Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.01563v1-abstract-short" style="display: inline;"> We develop and study FPGA implementations of algorithms for charged particle tracking based on graph neural networks. The two complementary FPGA designs are based on OpenCL, a framework for writing programs that execute across heterogeneous platforms, and hls4ml, a high-level-synthesis-based compiler for neural network to firmware conversion. We evaluate and compare the resource usage, latency, an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.01563v1-abstract-full').style.display = 'inline'; document.getElementById('2012.01563v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.01563v1-abstract-full" style="display: none;"> We develop and study FPGA implementations of algorithms for charged particle tracking based on graph neural networks. The two complementary FPGA designs are based on OpenCL, a framework for writing programs that execute across heterogeneous platforms, and hls4ml, a high-level-synthesis-based compiler for neural network to firmware conversion. We evaluate and compare the resource usage, latency, and tracking performance of our implementations based on a benchmark dataset. We find a considerable speedup over CPU-based execution is possible, potentially enabling such algorithms to be used effectively in future computing workflows and the FPGA-based Level-1 trigger at the CERN Large Hadron Collider. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.01563v1-abstract-full').style.display = 'none'; document.getElementById('2012.01563v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 4 figures, To appear in Third Workshop on Machine Learning and the Physical Sciences (NeurIPS 2020)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-CONF-20-622-CMS-SCD </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.08556">arXiv:2010.08556</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2010.08556">pdf</a>, <a href="https://arxiv.org/format/2010.08556">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/H2RC51942.2020.00010">10.1109/H2RC51942.2020.00010 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> FPGAs-as-a-Service Toolkit (FaaST) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Rankin%2C+D+S">Dylan Sheldon Rankin</a>, <a href="/search/cs?searchtype=author&amp;query=Krupa%2C+J">Jeffrey Krupa</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Flechas%2C+M+A">Maria Acosta Flechas</a>, <a href="/search/cs?searchtype=author&amp;query=Holzman%2C+B">Burt Holzman</a>, <a href="/search/cs?searchtype=author&amp;query=Klijnsma%2C+T">Thomas Klijnsma</a>, <a href="/search/cs?searchtype=author&amp;query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Hauck%2C+S">Scott Hauck</a>, <a href="/search/cs?searchtype=author&amp;query=Hsu%2C+S">Shih-Chieh Hsu</a>, <a href="/search/cs?searchtype=author&amp;query=Trahms%2C+M">Matthew Trahms</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+K">Kelvin Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Lou%2C+Y">Yu Lou</a>, <a href="/search/cs?searchtype=author&amp;query=Ho%2C+T">Ta-Wei Ho</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Mia Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.08556v1-abstract-short" style="display: inline;"> Computing needs for high energy physics are already intensive and are expected to increase drastically in the coming years. In this context, heterogeneous computing, specifically as-a-service computing, has the potential for significant gains over traditional computing models. Although previous studies and packages in the field of heterogeneous computing have focused on GPUs as accelerators, FPGAs&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.08556v1-abstract-full').style.display = 'inline'; document.getElementById('2010.08556v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.08556v1-abstract-full" style="display: none;"> Computing needs for high energy physics are already intensive and are expected to increase drastically in the coming years. In this context, heterogeneous computing, specifically as-a-service computing, has the potential for significant gains over traditional computing models. Although previous studies and packages in the field of heterogeneous computing have focused on GPUs as accelerators, FPGAs are an extremely promising option as well. A series of workflows are developed to establish the performance capabilities of FPGAs as a service. Multiple different devices and a range of algorithms for use in high energy physics are studied. For a small, dense network, the throughput can be improved by an order of magnitude with respect to GPUs as a service. For large convolutional networks, the throughput is found to be comparable to GPUs as a service. This work represents the first open-source FPGAs-as-a-service toolkit. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.08556v1-abstract-full').style.display = 'none'; document.getElementById('2010.08556v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 7 figures, to appear in proceedings of the 2020 IEEE/ACM International Workshop on Heterogeneous High-performance Reconfigurable Computing</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-CONF-20-426-SCD </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> 2020 IEEE/ACM International Workshop on Heterogeneous High-performance Reconfigurable Computing (H2RC), 2020, pp. 38-47 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.04509">arXiv:2009.04509</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.04509">pdf</a>, <a href="https://arxiv.org/format/2009.04509">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.3389/fdata.2020.604083">10.3389/fdata.2020.604083 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> GPU-accelerated machine learning inference as a service for computing in neutrino experiments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+M">Michael Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+T">Tingjun Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Flechas%2C+M+A">Maria Acosta Flechas</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Hawks%2C+B">Benjamin Hawks</a>, <a href="/search/cs?searchtype=author&amp;query=Holzman%2C+B">Burt Holzman</a>, <a href="/search/cs?searchtype=author&amp;query=Knoepfel%2C+K">Kyle Knoepfel</a>, <a href="/search/cs?searchtype=author&amp;query=Krupa%2C+J">Jeffrey Krupa</a>, <a href="/search/cs?searchtype=author&amp;query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.04509v2-abstract-short" style="display: inline;"> Machine learning algorithms are becoming increasingly prevalent and performant in the reconstruction of events in accelerator-based neutrino experiments. These sophisticated algorithms can be computationally expensive. At the same time, the data volumes of such experiments are rapidly increasing. The demand to process billions of neutrino events with many machine learning algorithm inferences crea&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.04509v2-abstract-full').style.display = 'inline'; document.getElementById('2009.04509v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.04509v2-abstract-full" style="display: none;"> Machine learning algorithms are becoming increasingly prevalent and performant in the reconstruction of events in accelerator-based neutrino experiments. These sophisticated algorithms can be computationally expensive. At the same time, the data volumes of such experiments are rapidly increasing. The demand to process billions of neutrino events with many machine learning algorithm inferences creates a computing challenge. We explore a computing model in which heterogeneous computing with GPU coprocessors is made available as a web service. The coprocessors can be efficiently and elastically deployed to provide the right amount of computing for a given processing task. With our approach, Services for Optimized Network Inference on Coprocessors (SONIC), we integrate GPU acceleration specifically for the ProtoDUNE-SP reconstruction chain without disrupting the native computing workflow. With our integrated framework, we accelerate the most time-consuming task, track and particle shower hit identification, by a factor of 17. This results in a factor of 2.7 reduction in the total processing time when compared with CPU-only production. For this particular task, only 1 GPU is required for every 68 CPU threads, providing a cost-effective solution. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.04509v2-abstract-full').style.display = 'none'; document.getElementById('2009.04509v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 7 figures, 2 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-20-428-ND-SCD </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.03601">arXiv:2008.03601</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2008.03601">pdf</a>, <a href="https://arxiv.org/format/2008.03601">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.3389/fdata.2020.598927">10.3389/fdata.2020.598927 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Distance-Weighted Graph Neural Networks on FPGAs for Real-Time Particle Reconstruction in High Energy Physics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Iiyama%2C+Y">Yutaro Iiyama</a>, <a href="/search/cs?searchtype=author&amp;query=Cerminara%2C+G">Gianluca Cerminara</a>, <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+A">Abhijay Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Kieseler%2C+J">Jan Kieseler</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&amp;query=Qasim%2C+S+R">Shah Rukh Qasim</a>, <a href="/search/cs?searchtype=author&amp;query=Rieger%2C+M">Marcel Rieger</a>, <a href="/search/cs?searchtype=author&amp;query=Summers%2C+S">Sioni Summers</a>, <a href="/search/cs?searchtype=author&amp;query=Van+Onsem%2C+G">Gerrit Van Onsem</a>, <a href="/search/cs?searchtype=author&amp;query=Wozniak%2C+K">Kinga Wozniak</a>, <a href="/search/cs?searchtype=author&amp;query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&amp;query=Di+Guglielmo%2C+G">Giuseppe Di Guglielmo</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&amp;query=Jindariani%2C+S">Sergo Jindariani</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Mia Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Kreinar%2C+E">Edward Kreinar</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zhenbin Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.03601v2-abstract-short" style="display: inline;"> Graph neural networks have been shown to achieve excellent performance for several crucial tasks in particle physics, such as charged particle tracking, jet tagging, and clustering. An important domain for the application of these networks is the FGPA-based first layer of real-time data filtering at the CERN Large Hadron Collider, which has strict latency and resource constraints. We discuss how t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.03601v2-abstract-full').style.display = 'inline'; document.getElementById('2008.03601v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.03601v2-abstract-full" style="display: none;"> Graph neural networks have been shown to achieve excellent performance for several crucial tasks in particle physics, such as charged particle tracking, jet tagging, and clustering. An important domain for the application of these networks is the FGPA-based first layer of real-time data filtering at the CERN Large Hadron Collider, which has strict latency and resource constraints. We discuss how to design distance-weighted graph networks that can be executed with a latency of less than 1$渭\mathrm{s}$ on an FPGA. To do so, we consider a representative task associated to particle reconstruction and identification in a next-generation calorimeter operating at a particle collider. We use a graph network architecture developed for such purposes, and apply additional simplifications to match the computing constraints of Level-1 trigger systems, including weight quantization. Using the $\mathtt{hls4ml}$ library, we convert the compressed models into firmware to be implemented on an FPGA. Performance of the synthesized models is presented both in terms of inference accuracy and resource usage. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.03601v2-abstract-full').style.display = 'none'; document.getElementById('2008.03601v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 4 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-20-405-E-SCD </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Frontiers in Big Data 3 (2021) 44 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.10359">arXiv:2007.10359</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2007.10359">pdf</a>, <a href="https://arxiv.org/format/2007.10359">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/2632-2153/abec21">10.1088/2632-2153/abec21 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> GPU coprocessors as a service for deep learning inference in high energy physics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Krupa%2C+J">Jeffrey Krupa</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+K">Kelvin Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Flechas%2C+M+A">Maria Acosta Flechas</a>, <a href="/search/cs?searchtype=author&amp;query=Dinsmore%2C+J">Jack Dinsmore</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Hauck%2C+S">Scott Hauck</a>, <a href="/search/cs?searchtype=author&amp;query=Holzman%2C+B">Burt Holzman</a>, <a href="/search/cs?searchtype=author&amp;query=Hsu%2C+S">Shih-Chieh Hsu</a>, <a href="/search/cs?searchtype=author&amp;query=Klijnsma%2C+T">Thomas Klijnsma</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Mia Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&amp;query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&amp;query=Suaysom%2C+N">Natchanon Suaysom</a>, <a href="/search/cs?searchtype=author&amp;query=Trahms%2C+M">Matt Trahms</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.10359v2-abstract-short" style="display: inline;"> In the next decade, the demands for computing in large scientific experiments are expected to grow tremendously. During the same time period, CPU performance increases will be limited. At the CERN Large Hadron Collider (LHC), these two issues will confront one another as the collider is upgraded for high luminosity running. Alternative processors such as graphics processing units (GPUs) can resolv&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.10359v2-abstract-full').style.display = 'inline'; document.getElementById('2007.10359v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.10359v2-abstract-full" style="display: none;"> In the next decade, the demands for computing in large scientific experiments are expected to grow tremendously. During the same time period, CPU performance increases will be limited. At the CERN Large Hadron Collider (LHC), these two issues will confront one another as the collider is upgraded for high luminosity running. Alternative processors such as graphics processing units (GPUs) can resolve this confrontation provided that algorithms can be sufficiently accelerated. In many cases, algorithmic speedups are found to be largest through the adoption of deep learning algorithms. We present a comprehensive exploration of the use of GPU-based hardware acceleration for deep learning inference within the data reconstruction workflow of high energy physics. We present several realistic examples and discuss a strategy for the seamless integration of coprocessors so that the LHC can maintain, if not exceed, its current performance throughout its running. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.10359v2-abstract-full').style.display = 'none'; document.getElementById('2007.10359v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">26 pages, 7 figures, 2 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-20-338-E-SCD </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Mach. Learn.: Sci. Technol. 2 (2021) 035005 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.00606">arXiv:2004.00606</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2004.00606">pdf</a>, <a href="https://arxiv.org/ps/2004.00606">ps</a>, <a href="https://arxiv.org/format/2004.00606">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Discrete Mathematics">cs.DM</span> </div> </div> <p class="title is-5 mathjax"> Tipsy cop and drunken robber: a variant of the cop and robber game on graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Pamela Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Insko%2C+E">Erik Insko</a>, <a href="/search/cs?searchtype=author&amp;query=Prieto-Langarica%2C+A">Alicia Prieto-Langarica</a>, <a href="/search/cs?searchtype=author&amp;query=Stoisavljevic%2C+R">Rade Stoisavljevic</a>, <a href="/search/cs?searchtype=author&amp;query=Sullivan%2C+S">Shaun Sullivan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.00606v1-abstract-short" style="display: inline;"> Motivated by a biological scenario illustrated in the YouTube video \url{ https://www.youtube.com/watch?v=Z_mXDvZQ6dU} where a neutrophil chases a bacteria cell moving in random directions, we present a variant of the cop and robber game on graphs called the tipsy cop and drunken robber game. In this game, we place a tipsy cop and a drunken robber at different vertices of a finite connected graph&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.00606v1-abstract-full').style.display = 'inline'; document.getElementById('2004.00606v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.00606v1-abstract-full" style="display: none;"> Motivated by a biological scenario illustrated in the YouTube video \url{ https://www.youtube.com/watch?v=Z_mXDvZQ6dU} where a neutrophil chases a bacteria cell moving in random directions, we present a variant of the cop and robber game on graphs called the tipsy cop and drunken robber game. In this game, we place a tipsy cop and a drunken robber at different vertices of a finite connected graph $G$. The game consists of independent moves where the robber begins the game by moving to an adjacent vertex from where he began, this is then followed by the cop moving to an adjacent vertex from where she began. Since the robber is inebriated, he takes random walks on the graph, while the cop being tipsy means that her movements are sometimes random and sometimes intentional. Our main results give formulas for the probability that the robber is still free from capture after $m$ moves of this game on highly symmetric graphs, such as the complete graphs, complete bipartite graphs, and cycle graphs. We also give the expected encounter time between the cop and robber for these families of graphs. We end the manuscript by presenting a general method for computing such probabilities and also detail a variety of directions for future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.00606v1-abstract-full').style.display = 'none'; document.getElementById('2004.00606v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 05A05; 05C25; 05C30; 05C78; 05C85 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.06308">arXiv:2003.06308</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2003.06308">pdf</a>, <a href="https://arxiv.org/format/2003.06308">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/2632-2153/aba042">10.1088/2632-2153/aba042 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Compressing deep neural networks on FPGAs to binary and ternary precision with HLS4ML </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Di+Guglielmo%2C+G">Giuseppe Di Guglielmo</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Hoang%2C+D">Duc Hoang</a>, <a href="/search/cs?searchtype=author&amp;query=Jindariani%2C+S">Sergo Jindariani</a>, <a href="/search/cs?searchtype=author&amp;query=Kreinar%2C+E">Edward Kreinar</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Mia Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&amp;query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&amp;query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&amp;query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&amp;query=Sagear%2C+S">Sheila Sagear</a>, <a href="/search/cs?searchtype=author&amp;query=Summers%2C+S">Sioni Summers</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zhenbin Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.06308v2-abstract-short" style="display: inline;"> We present the implementation of binary and ternary neural networks in the hls4ml library, designed to automatically convert deep neural network models to digital circuits with FPGA firmware. Starting from benchmark models trained with floating point precision, we investigate different strategies to reduce the network&#39;s resource consumption by reducing the numerical precision of the network parame&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.06308v2-abstract-full').style.display = 'inline'; document.getElementById('2003.06308v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.06308v2-abstract-full" style="display: none;"> We present the implementation of binary and ternary neural networks in the hls4ml library, designed to automatically convert deep neural network models to digital circuits with FPGA firmware. Starting from benchmark models trained with floating point precision, we investigate different strategies to reduce the network&#39;s resource consumption by reducing the numerical precision of the network parameters to binary or ternary. We discuss the trade-off between model accuracy and resource consumption. In addition, we show how to balance between latency and accuracy by retaining full precision on a selected subset of network components. As an example, we consider two multiclass classification tasks: handwritten digit recognition with the MNIST data set and jet identification with simulated proton-proton collisions at the CERN Large Hadron Collider. The binary and ternary implementation has similar performance to the higher precision implementation while using drastically fewer FPGA resources. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.06308v2-abstract-full').style.display = 'none'; document.getElementById('2003.06308v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Update to MLST journal version</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-20-167-PPD-SCD </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Mach. Learn.: Sci. Technol. 2, 015001 (2020) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.02534">arXiv:2002.02534</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2002.02534">pdf</a>, <a href="https://arxiv.org/format/2002.02534">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/1748-0221/15/05/p05026">10.1088/1748-0221/15/05/p05026 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fast inference of Boosted Decision Trees in FPGAs for particle physics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Summers%2C+S">Sioni Summers</a>, <a href="/search/cs?searchtype=author&amp;query=Di+Guglielmo%2C+G">Giuseppe Di Guglielmo</a>, <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Hoang%2C+D">Duc Hoang</a>, <a href="/search/cs?searchtype=author&amp;query=Jindariani%2C+S">Sergo Jindariani</a>, <a href="/search/cs?searchtype=author&amp;query=Kreinar%2C+E">Edward Kreinar</a>, <a href="/search/cs?searchtype=author&amp;query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&amp;query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&amp;query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&amp;query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zhenbin Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.02534v2-abstract-short" style="display: inline;"> We describe the implementation of Boosted Decision Trees in the hls4ml library, which allows the translation of a trained model into FPGA firmware through an automated conversion process. Thanks to its fully on-chip implementation, hls4ml performs inference of Boosted Decision Tree models with extremely low latency. With a typical latency less than 100 ns, this solution is suitable for FPGA-based&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.02534v2-abstract-full').style.display = 'inline'; document.getElementById('2002.02534v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.02534v2-abstract-full" style="display: none;"> We describe the implementation of Boosted Decision Trees in the hls4ml library, which allows the translation of a trained model into FPGA firmware through an automated conversion process. Thanks to its fully on-chip implementation, hls4ml performs inference of Boosted Decision Tree models with extremely low latency. With a typical latency less than 100 ns, this solution is suitable for FPGA-based real-time processing, such as in the Level-1 Trigger system of a collider experiment. These developments open up prospects for physicists to deploy BDTs in FPGAs for identifying the origin of jets, better reconstructing the energies of muons, and enabling better selection of rare signal processes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.02534v2-abstract-full').style.display = 'none'; document.getElementById('2002.02534v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> JINST 15 P05026 (2020) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.05796">arXiv:1911.05796</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1911.05796">pdf</a>, <a href="https://arxiv.org/ps/1911.05796">ps</a>, <a href="https://arxiv.org/format/1911.05796">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> Response to NITRD, NCO, NSF Request for Information on &#34;Update to the 2016 National Artificial Intelligence Research and Development Strategic Plan&#34; </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Amundson%2C+J">J. Amundson</a>, <a href="/search/cs?searchtype=author&amp;query=Annis%2C+J">J. Annis</a>, <a href="/search/cs?searchtype=author&amp;query=Avestruz%2C+C">C. Avestruz</a>, <a href="/search/cs?searchtype=author&amp;query=Bowring%2C+D">D. Bowring</a>, <a href="/search/cs?searchtype=author&amp;query=Caldeira%2C+J">J. Caldeira</a>, <a href="/search/cs?searchtype=author&amp;query=Cerati%2C+G">G. Cerati</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+C">C. Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Dodelson%2C+S">S. Dodelson</a>, <a href="/search/cs?searchtype=author&amp;query=Elvira%2C+D">D. Elvira</a>, <a href="/search/cs?searchtype=author&amp;query=Farahi%2C+A">A. Farahi</a>, <a href="/search/cs?searchtype=author&amp;query=Genser%2C+K">K. Genser</a>, <a href="/search/cs?searchtype=author&amp;query=Gray%2C+L">L. Gray</a>, <a href="/search/cs?searchtype=author&amp;query=Gutsche%2C+O">O. Gutsche</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">P. Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Kinney%2C+J">J. Kinney</a>, <a href="/search/cs?searchtype=author&amp;query=Kowalkowski%2C+J+B">J. B. Kowalkowski</a>, <a href="/search/cs?searchtype=author&amp;query=Kutschke%2C+R">R. Kutschke</a>, <a href="/search/cs?searchtype=author&amp;query=Mrenna%2C+S">S. Mrenna</a>, <a href="/search/cs?searchtype=author&amp;query=Nord%2C+B">B. Nord</a>, <a href="/search/cs?searchtype=author&amp;query=Para%2C+A">A. Para</a>, <a href="/search/cs?searchtype=author&amp;query=Pedro%2C+K">K. Pedro</a>, <a href="/search/cs?searchtype=author&amp;query=Perdue%2C+G+N">G. N. Perdue</a>, <a href="/search/cs?searchtype=author&amp;query=Scheinker%2C+A">A. Scheinker</a>, <a href="/search/cs?searchtype=author&amp;query=Spentzouris%2C+P">P. Spentzouris</a>, <a href="/search/cs?searchtype=author&amp;query=John%2C+J+S">J. St. John</a> , et al. (5 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.05796v1-abstract-short" style="display: inline;"> We present a response to the 2018 Request for Information (RFI) from the NITRD, NCO, NSF regarding the &#34;Update to the 2016 National Artificial Intelligence Research and Development Strategic Plan.&#34; Through this document, we provide a response to the question of whether and how the National Artificial Intelligence Research and Development Strategic Plan (NAIRDSP) should be updated from the perspect&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.05796v1-abstract-full').style.display = 'inline'; document.getElementById('1911.05796v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.05796v1-abstract-full" style="display: none;"> We present a response to the 2018 Request for Information (RFI) from the NITRD, NCO, NSF regarding the &#34;Update to the 2016 National Artificial Intelligence Research and Development Strategic Plan.&#34; Through this document, we provide a response to the question of whether and how the National Artificial Intelligence Research and Development Strategic Plan (NAIRDSP) should be updated from the perspective of Fermilab, America&#39;s premier national laboratory for High Energy Physics (HEP). We believe the NAIRDSP should be extended in light of the rapid pace of development and innovation in the field of Artificial Intelligence (AI) since 2016, and present our recommendations below. AI has profoundly impacted many areas of human life, promising to dramatically reshape society --- e.g., economy, education, science --- in the coming years. We are still early in this process. It is critical to invest now in this technology to ensure it is safe and deployed ethically. Science and society both have a strong need for accuracy, efficiency, transparency, and accountability in algorithms, making investments in scientific AI particularly valuable. Thus far the US has been a leader in AI technologies, and we believe as a national Laboratory it is crucial to help maintain and extend this leadership. Moreover, investments in AI will be important for maintaining US leadership in the physical sciences. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.05796v1-abstract-full').style.display = 'none'; document.getElementById('1911.05796v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-FN-1092-SCD </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1804.06913">arXiv:1804.06913</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1804.06913">pdf</a>, <a href="https://arxiv.org/format/1804.06913">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/1748-0221/13/07/P07027">10.1088/1748-0221/13/07/P07027 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fast inference of deep neural networks in FPGAs for particle physics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+S">Song Han</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Jindariani%2C+S">Sergo Jindariani</a>, <a href="/search/cs?searchtype=author&amp;query=Kreinar%2C+E">Edward Kreinar</a>, <a href="/search/cs?searchtype=author&amp;query=Kreis%2C+B">Benjamin Kreis</a>, <a href="/search/cs?searchtype=author&amp;query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&amp;query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&amp;query=Rivera%2C+R">Ryan Rivera</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zhenbin Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1804.06913v3-abstract-short" style="display: inline;"> Recent results at the Large Hadron Collider (LHC) have pointed to enhanced physics capabilities through the improvement of the real-time event processing techniques. Machine learning methods are ubiquitous and have proven to be very powerful in LHC physics, and particle physics as a whole. However, exploration of the use of such techniques in low-latency, low-power FPGA hardware has only just begu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.06913v3-abstract-full').style.display = 'inline'; document.getElementById('1804.06913v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1804.06913v3-abstract-full" style="display: none;"> Recent results at the Large Hadron Collider (LHC) have pointed to enhanced physics capabilities through the improvement of the real-time event processing techniques. Machine learning methods are ubiquitous and have proven to be very powerful in LHC physics, and particle physics as a whole. However, exploration of the use of such techniques in low-latency, low-power FPGA hardware has only just begun. FPGA-based trigger and data acquisition (DAQ) systems have extremely low, sub-microsecond latency requirements that are unique to particle physics. We present a case study for neural network inference in FPGAs focusing on a classifier for jet substructure which would enable, among many other physics scenarios, searches for new dark sector particles and novel measurements of the Higgs boson. While we focus on a specific example, the lessons are far-reaching. We develop a package based on High-Level Synthesis (HLS) called hls4ml to build machine learning models in FPGAs. The use of HLS increases accessibility across a broad user community and allows for a drastic decrease in firmware development time. We map out FPGA resource usage and latency versus neural network hyperparameters to identify the problems in particle physics that would benefit from performing neural network inference with FPGAs. For our example jet substructure model, we fit well within the available resources of modern FPGAs with a latency on the scale of 100 ns. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.06913v3-abstract-full').style.display = 'none'; document.getElementById('1804.06913v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 17 figures, 2 tables, JINST revision</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-18-089-E </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> JINST 13 P07027 (2018) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1711.03477">arXiv:1711.03477</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1711.03477">pdf</a>, <a href="https://arxiv.org/format/1711.03477">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/LWC.2018.2799863">10.1109/LWC.2018.2799863 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Achievable Rates and Training Overheads for a Measured LOS Massive MIMO Channel </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Paul Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Hasan%2C+W+B">Wael Boukley Hasan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+L">Liang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Malkowsky%2C+S">Steffen Malkowsky</a>, <a href="/search/cs?searchtype=author&amp;query=Beach%2C+M">Mark Beach</a>, <a href="/search/cs?searchtype=author&amp;query=Armour%2C+S">Simon Armour</a>, <a href="/search/cs?searchtype=author&amp;query=Tufvesson%2C+F">Fredrik Tufvesson</a>, <a href="/search/cs?searchtype=author&amp;query=Edfors%2C+O">Ove Edfors</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1711.03477v2-abstract-short" style="display: inline;"> This paper presents achievable uplink (UL) sumrate predictions for a measured line-of-sight (LOS) massive multiple-input, multiple-output (MIMO) (MMIMO) scenario and illustrates the trade-off between spatial multiplexing performance and channel de-coherence rate for an increasing number of base station (BS) antennas. In addition, an orthogonal frequency division multiplexing (OFDM) case study is f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1711.03477v2-abstract-full').style.display = 'inline'; document.getElementById('1711.03477v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1711.03477v2-abstract-full" style="display: none;"> This paper presents achievable uplink (UL) sumrate predictions for a measured line-of-sight (LOS) massive multiple-input, multiple-output (MIMO) (MMIMO) scenario and illustrates the trade-off between spatial multiplexing performance and channel de-coherence rate for an increasing number of base station (BS) antennas. In addition, an orthogonal frequency division multiplexing (OFDM) case study is formed which considers the 90% coherence time to evaluate the impact of MMIMO channel training overheads in high-speed LOS scenarios. It is shown that whilst 25% of the achievable zero-forcing (ZF) sumrate is lost when the resounding interval is increased by a factor of 4, the OFDM training overheads for a 100-antenna MMIMO BS using an LTE-like physical layer could be as low as 2% for a terminal speed of 90m/s. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1711.03477v2-abstract-full').style.display = 'none'; document.getElementById('1711.03477v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 November, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages, 5 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Wireless Communications Letters 2018 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1705.07540">arXiv:1705.07540</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1705.07540">pdf</a>, <a href="https://arxiv.org/format/1705.07540">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1049/ic.2016.0064">10.1049/ic.2016.0064 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> An Overview of Massive MIMO Research at the University of Bristol </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Paul Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Hasan%2C+W+B">Wael Boukley Hasan</a>, <a href="/search/cs?searchtype=author&amp;query=Brice%2C+H">Henry Brice</a>, <a href="/search/cs?searchtype=author&amp;query=Chitambira%2C+B">Benny Chitambira</a>, <a href="/search/cs?searchtype=author&amp;query=Beach%2C+M">Mark Beach</a>, <a href="/search/cs?searchtype=author&amp;query=Mellios%2C+E">Evangelos Mellios</a>, <a href="/search/cs?searchtype=author&amp;query=Nix%2C+A">Andrew Nix</a>, <a href="/search/cs?searchtype=author&amp;query=Armour%2C+S">Simon Armour</a>, <a href="/search/cs?searchtype=author&amp;query=Doufexi%2C+A">Angela Doufexi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1705.07540v1-abstract-short" style="display: inline;"> Massive MIMO has rapidly gained popularity as a technology crucial to the capacity advances required for 5G wireless systems. Since its theoretical conception six years ago, research activity has grown exponentially, and there is now a developing industrial interest to commercialise the technology. For this to happen effectively, we believe it is crucial that further pragmatic research is conducte&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1705.07540v1-abstract-full').style.display = 'inline'; document.getElementById('1705.07540v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1705.07540v1-abstract-full" style="display: none;"> Massive MIMO has rapidly gained popularity as a technology crucial to the capacity advances required for 5G wireless systems. Since its theoretical conception six years ago, research activity has grown exponentially, and there is now a developing industrial interest to commercialise the technology. For this to happen effectively, we believe it is crucial that further pragmatic research is conducted with a view to establish how reality differs from theoretical ideals. This paper presents an overview of the massive MIMO research activities occurring within the Communication Systems &amp; Networks Group at the University of Bristol centred around our 128-antenna real-time testbed, which has been developed through the BIO programmable city initiative in collaboration with NI and Lund University. Through recent preliminary trials, we achieved a world first spectral efficiency of 79.4 bits/s/Hz, and subsequently demonstrated that this could be increased to 145.6 bits/s/Hz. We provide a summary of this work here along with some of our ongoing research directions such as large-scale array wave-front analysis, optimised power control and localisation techniques. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1705.07540v1-abstract-full').style.display = 'none'; document.getElementById('1705.07540v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 May, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Presented at the IET Radio Propagation and Technologies for 5G Conference (2016). 5 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1703.04723">arXiv:1703.04723</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1703.04723">pdf</a>, <a href="https://arxiv.org/format/1703.04723">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Temporal Analysis of Measured LOS Massive MIMO Channels with Mobility </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Paul Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Malkowsky%2C+S">Steffen Malkowsky</a>, <a href="/search/cs?searchtype=author&amp;query=Vieira%2C+J">Joao Vieira</a>, <a href="/search/cs?searchtype=author&amp;query=Tufvesson%2C+F">Fredrik Tufvesson</a>, <a href="/search/cs?searchtype=author&amp;query=Hasan%2C+W+B">Wael Boukley Hasan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+L">Liang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Beach%2C+M">Mark Beach</a>, <a href="/search/cs?searchtype=author&amp;query=Armour%2C+S">Simon Armour</a>, <a href="/search/cs?searchtype=author&amp;query=Edfors%2C+O">Ove Edfors</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1703.04723v1-abstract-short" style="display: inline;"> The first measured results for massive multiple-input, multiple-output (MIMO) performance in a line-of-sight (LOS) scenario with moderate mobility are presented, with 8 users served by a 100 antenna base Station (BS) at 3.7 GHz. When such a large number of channels dynamically change, the inherent propagation and processing delay has a critical relationship with the rate of change, as the use of o&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.04723v1-abstract-full').style.display = 'inline'; document.getElementById('1703.04723v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1703.04723v1-abstract-full" style="display: none;"> The first measured results for massive multiple-input, multiple-output (MIMO) performance in a line-of-sight (LOS) scenario with moderate mobility are presented, with 8 users served by a 100 antenna base Station (BS) at 3.7 GHz. When such a large number of channels dynamically change, the inherent propagation and processing delay has a critical relationship with the rate of change, as the use of outdated channel information can result in severe detection and precoding inaccuracies. For the downlink (DL) in particular, a time division duplex (TDD) configuration synonymous with massive MIMO deployments could mean only the uplink (UL) is usable in extreme cases. Therefore, it is of great interest to investigate the impact of mobility on massive MIMO performance and consider ways to combat the potential limitations. In a mobile scenario with moving cars and pedestrians, the correlation of the MIMO channel vector over time is inspected for vehicles moving up to 29 km/h. For a 100 antenna system, it is found that the channel state information (CSI) update rate requirement may increase by 7 times when compared to an 8 antenna system, whilst the power control update rate could be decreased by at least 5 times relative to a single antenna system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.04723v1-abstract-full').style.display = 'none'; document.getElementById('1703.04723v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for presentation at the 85th IEEE Vehicular Technology Conference in Sydney. 5 Pages. arXiv admin note: substantial text overlap with arXiv:1701.08818</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1701.08818">arXiv:1701.08818</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1701.08818">pdf</a>, <a href="https://arxiv.org/format/1701.08818">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/JSAC.2017.2686678">10.1109/JSAC.2017.2686678 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Performance Characterization of a Real-Time Massive MIMO System with LOS Mobile Channels </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Paul Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Malkowsky%2C+S">Steffen Malkowsky</a>, <a href="/search/cs?searchtype=author&amp;query=Vieira%2C+J">Joao Vieira</a>, <a href="/search/cs?searchtype=author&amp;query=Hassan%2C+F+T+W+B">Fredrik Tufvesson Wael Boukley Hassan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+L">Liang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Beach%2C+M">Mark Beach</a>, <a href="/search/cs?searchtype=author&amp;query=Armour%2C+S">Simon Armour</a>, <a href="/search/cs?searchtype=author&amp;query=Edfors%2C+O">Ove Edfors</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1701.08818v3-abstract-short" style="display: inline;"> The first measured results for massive MIMO performance in a line-of-sight (LOS) scenario with moderate mobility are presented, with 8 users served in real-time using a 100-antenna base Station (BS) at 3.7 GHz. When such a large number of channels dynamically change, the inherent propagation and processing delay has a critical relationship with the rate of change, as the use of outdated channel in&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1701.08818v3-abstract-full').style.display = 'inline'; document.getElementById('1701.08818v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1701.08818v3-abstract-full" style="display: none;"> The first measured results for massive MIMO performance in a line-of-sight (LOS) scenario with moderate mobility are presented, with 8 users served in real-time using a 100-antenna base Station (BS) at 3.7 GHz. When such a large number of channels dynamically change, the inherent propagation and processing delay has a critical relationship with the rate of change, as the use of outdated channel information can result in severe detection and precoding inaccuracies. For the downlink (DL) in particular, a time division duplex (TDD) configuration synonymous with massive multiple-input, multiple-output (MIMO) deployments could mean only the uplink (UL) is usable in extreme cases. Therefore, it is of great interest to investigate the impact of mobility on massive MIMO performance and consider ways to combat the potential limitations. In a mobile scenario with moving cars and pedestrians, the massive MIMO channel is sampled across many points in space to build a picture of the overall user orthogonality, and the impact of both azimuth and elevation array configurations are considered. Temporal analysis is also conducted for vehicles moving up to 29km/h and real-time bit error rates (BERs) for both the UL and DL without power control are presented. For a 100-antenna system, it is found that the channel state information (CSI) update rate requirement may increase by 7 times when compared to an 8-antenna system, whilst the power control update rate could be decreased by at least 5 times relative to a single antenna system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1701.08818v3-abstract-full').style.display = 'none'; document.getElementById('1701.08818v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 May, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 January, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to the 2017 IEEE JSAC Special Issue on Deployment Issues and Performance Challenges for 5G, IEEE Journal on Selected Areas in Communications, 2017, vol.PP, no.99, pp.1-1</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1701.01161">arXiv:1701.01161</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1701.01161">pdf</a>, <a href="https://arxiv.org/format/1701.01161">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> The World&#39;s First Real-Time Testbed for Massive MIMO: Design, Implementation, and Validation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Malkowsky%2C+S">Steffen Malkowsky</a>, <a href="/search/cs?searchtype=author&amp;query=Vieira%2C+J">Joao Vieira</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+L">Liang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Harris%2C+P">Paul Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Nieman%2C+K">Karl Nieman</a>, <a href="/search/cs?searchtype=author&amp;query=Kundargi%2C+N">Nikhil Kundargi</a>, <a href="/search/cs?searchtype=author&amp;query=Wong%2C+I">Ian Wong</a>, <a href="/search/cs?searchtype=author&amp;query=Tufvesson%2C+F">Fredrik Tufvesson</a>, <a href="/search/cs?searchtype=author&amp;query=%C3%96wall%2C+V">Viktor 脰wall</a>, <a href="/search/cs?searchtype=author&amp;query=Edfors%2C+O">Ove Edfors</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1701.01161v2-abstract-short" style="display: inline;"> This paper sets up a framework for designing a massive multiple-input multiple-output (MIMO) testbed by investigating hardware (HW) and system-level requirements such as processing complexity, duplexing mode and frame structure. Taking these into account, a generic system and processing partitioning is proposed which allows flexible scaling and processing distribution onto a multitude of physicall&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1701.01161v2-abstract-full').style.display = 'inline'; document.getElementById('1701.01161v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1701.01161v2-abstract-full" style="display: none;"> This paper sets up a framework for designing a massive multiple-input multiple-output (MIMO) testbed by investigating hardware (HW) and system-level requirements such as processing complexity, duplexing mode and frame structure. Taking these into account, a generic system and processing partitioning is proposed which allows flexible scaling and processing distribution onto a multitude of physically separated devices. Based on the given HW constraints such as maximum number of links and maximum throughput for peer-to-peer interconnections combined with processing capabilities, the framework allows to evaluate modular HW components. To verify our design approach, we present the LuMaMi (Lund University Massive MIMO) testbed which constitutes the first reconfigurable real-time HW platform for prototyping massive MIMO. Utilizing up to 100 base station antennas and more than 50 Field Programmable Gate Arrays, up to 12 user equipments are served on the same time/frequency resource using an LTE-like Orthogonal Frequency Division Multiplexing time-division duplex-based transmission scheme. Proof-of-concept tests with this system show that massive MIMO can simultaneously serve a multitude of users in a static indoor and static outdoor environment utilizing the same time/frequency resource. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1701.01161v2-abstract-full').style.display = 'none'; document.getElementById('1701.01161v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 May, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 December, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, accepted for publication in IEEE Access</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10