CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–17 of 17 results for author: <span class="mathjax">Pedro, K</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Pedro%2C+K">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Pedro, K"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Pedro%2C+K&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Pedro, K"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.21611">arXiv:2410.21611</a> <span> [<a href="https://arxiv.org/pdf/2410.21611">pdf</a>, <a href="https://arxiv.org/format/2410.21611">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> </div> <p class="title is-5 mathjax"> CaloChallenge 2022: A Community Challenge for Fast Calorimeter Simulation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Krause%2C+C">Claudius Krause</a>, <a href="/search/cs?searchtype=author&query=Giannelli%2C+M+F">Michele Faucci Giannelli</a>, <a href="/search/cs?searchtype=author&query=Kasieczka%2C+G">Gregor Kasieczka</a>, <a href="/search/cs?searchtype=author&query=Nachman%2C+B">Benjamin Nachman</a>, <a href="/search/cs?searchtype=author&query=Salamani%2C+D">Dalila Salamani</a>, <a href="/search/cs?searchtype=author&query=Shih%2C+D">David Shih</a>, <a href="/search/cs?searchtype=author&query=Zaborowska%2C+A">Anna Zaborowska</a>, <a href="/search/cs?searchtype=author&query=Amram%2C+O">Oz Amram</a>, <a href="/search/cs?searchtype=author&query=Borras%2C+K">Kerstin Borras</a>, <a href="/search/cs?searchtype=author&query=Buckley%2C+M+R">Matthew R. Buckley</a>, <a href="/search/cs?searchtype=author&query=Buhmann%2C+E">Erik Buhmann</a>, <a href="/search/cs?searchtype=author&query=Buss%2C+T">Thorsten Buss</a>, <a href="/search/cs?searchtype=author&query=Cardoso%2C+R+P+D+C">Renato Paulo Da Costa Cardoso</a>, <a href="/search/cs?searchtype=author&query=Caterini%2C+A+L">Anthony L. Caterini</a>, <a href="/search/cs?searchtype=author&query=Chernyavskaya%2C+N">Nadezda Chernyavskaya</a>, <a href="/search/cs?searchtype=author&query=Corchia%2C+F+A+G">Federico A. G. Corchia</a>, <a href="/search/cs?searchtype=author&query=Cresswell%2C+J+C">Jesse C. Cresswell</a>, <a href="/search/cs?searchtype=author&query=Diefenbacher%2C+S">Sascha Diefenbacher</a>, <a href="/search/cs?searchtype=author&query=Dreyer%2C+E">Etienne Dreyer</a>, <a href="/search/cs?searchtype=author&query=Ekambaram%2C+V">Vijay Ekambaram</a>, <a href="/search/cs?searchtype=author&query=Eren%2C+E">Engin Eren</a>, <a href="/search/cs?searchtype=author&query=Ernst%2C+F">Florian Ernst</a>, <a href="/search/cs?searchtype=author&query=Favaro%2C+L">Luigi Favaro</a>, <a href="/search/cs?searchtype=author&query=Franchini%2C+M">Matteo Franchini</a>, <a href="/search/cs?searchtype=author&query=Gaede%2C+F">Frank Gaede</a> , et al. (44 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.21611v1-abstract-short" style="display: inline;"> We present the results of the "Fast Calorimeter Simulation Challenge 2022" - the CaloChallenge. We study state-of-the-art generative models on four calorimeter shower datasets of increasing dimensionality, ranging from a few hundred voxels to a few tens of thousand voxels. The 31 individual submissions span a wide range of current popular generative architectures, including Variational AutoEncoder… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21611v1-abstract-full').style.display = 'inline'; document.getElementById('2410.21611v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.21611v1-abstract-full" style="display: none;"> We present the results of the "Fast Calorimeter Simulation Challenge 2022" - the CaloChallenge. We study state-of-the-art generative models on four calorimeter shower datasets of increasing dimensionality, ranging from a few hundred voxels to a few tens of thousand voxels. The 31 individual submissions span a wide range of current popular generative architectures, including Variational AutoEncoders (VAEs), Generative Adversarial Networks (GANs), Normalizing Flows, Diffusion models, and models based on Conditional Flow Matching. We compare all submissions in terms of quality of generated calorimeter showers, as well as shower generation time and model size. To assess the quality we use a broad range of different metrics including differences in 1-dimensional histograms of observables, KPD/FPD scores, AUCs of binary classifiers, and the log-posterior of a multiclass classifier. The results of the CaloChallenge provide the most complete and comprehensive survey of cutting-edge approaches to calorimeter fast simulation to date. In addition, our work provides a uniquely detailed perspective on the important problem of how to evaluate generative models. As such, the results presented here should be applicable for other domains that use generative AI and require fast and faithful generation of samples in a large phase space. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21611v1-abstract-full').style.display = 'none'; document.getElementById('2410.21611v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">204 pages, 100+ figures, 30+ tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> HEPHY-ML-24-05, FERMILAB-PUB-24-0728-CMS, TTK-24-43 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.02005">arXiv:2302.02005</a> <span> [<a href="https://arxiv.org/pdf/2302.02005">pdf</a>, <a href="https://arxiv.org/format/2302.02005">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DeepAstroUDA: Semi-Supervised Universal Domain Adaptation for Cross-Survey Galaxy Morphology Classification and Anomaly Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=%C4%86iprijanovi%C4%87%2C+A">A. 膯iprijanovi膰</a>, <a href="/search/cs?searchtype=author&query=Lewis%2C+A">A. Lewis</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">K. Pedro</a>, <a href="/search/cs?searchtype=author&query=Madireddy%2C+S">S. Madireddy</a>, <a href="/search/cs?searchtype=author&query=Nord%2C+B">B. Nord</a>, <a href="/search/cs?searchtype=author&query=Perdue%2C+G+N">G. N. Perdue</a>, <a href="/search/cs?searchtype=author&query=Wild%2C+S+M">S. M. Wild</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.02005v2-abstract-short" style="display: inline;"> Artificial intelligence methods show great promise in increasing the quality and speed of work with large astronomical datasets, but the high complexity of these methods leads to the extraction of dataset-specific, non-robust features. Therefore, such methods do not generalize well across multiple datasets. We present a universal domain adaptation method, \textit{DeepAstroUDA}, as an approach to o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.02005v2-abstract-full').style.display = 'inline'; document.getElementById('2302.02005v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.02005v2-abstract-full" style="display: none;"> Artificial intelligence methods show great promise in increasing the quality and speed of work with large astronomical datasets, but the high complexity of these methods leads to the extraction of dataset-specific, non-robust features. Therefore, such methods do not generalize well across multiple datasets. We present a universal domain adaptation method, \textit{DeepAstroUDA}, as an approach to overcome this challenge. This algorithm performs semi-supervised domain adaptation and can be applied to datasets with different data distributions and class overlaps. Non-overlapping classes can be present in any of the two datasets (the labeled source domain, or the unlabeled target domain), and the method can even be used in the presence of unknown classes. We apply our method to three examples of galaxy morphology classification tasks of different complexities ($3$-class and $10$-class problems), with anomaly detection: 1) datasets created after different numbers of observing years from a single survey (LSST mock data of $1$ and $10$ years of observations); 2) data from different surveys (SDSS and DECaLS); and 3) data from observing fields with different depths within one survey (wide field and Stripe 82 deep field of SDSS). For the first time, we demonstrate the successful use of domain adaptation between very discrepant observational datasets. \textit{DeepAstroUDA} is capable of bridging the gap between two astronomical surveys, increasing classification accuracy in both domains (up to $40\%$ on the unlabeled data), and making model performance consistent across datasets. Furthermore, our method also performs well as an anomaly detection algorithm and successfully clusters unknown class samples even in the unlabeled target dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.02005v2-abstract-full').style.display = 'none'; document.getElementById('2302.02005v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in Machine Learning Science and Technology (MLST); 24 pages, 14 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-23-034-CSAID </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.04633">arXiv:2301.04633</a> <span> [<a href="https://arxiv.org/pdf/2301.04633">pdf</a>, <a href="https://arxiv.org/ps/2301.04633">ps</a>, <a href="https://arxiv.org/format/2301.04633">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s41781-023-00101-0">10.1007/s41781-023-00101-0 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Accelerating Machine Learning Inference with GPUs in ProtoDUNE Data Processing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Cai%2C+T">Tejin Cai</a>, <a href="/search/cs?searchtype=author&query=Herner%2C+K">Kenneth Herner</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+T">Tingjun Yang</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+M">Michael Wang</a>, <a href="/search/cs?searchtype=author&query=Flechas%2C+M+A">Maria Acosta Flechas</a>, <a href="/search/cs?searchtype=author&query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&query=Holzman%2C+B">Burt Holzman</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Tran%2C+N">Nhan Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.04633v2-abstract-short" style="display: inline;"> We study the performance of a cloud-based GPU-accelerated inference server to speed up event reconstruction in neutrino data batch jobs. Using detector data from the ProtoDUNE experiment and employing the standard DUNE grid job submission tools, we attempt to reprocess the data by running several thousand concurrent grid jobs, a rate we expect to be typical of current and future neutrino physics e… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.04633v2-abstract-full').style.display = 'inline'; document.getElementById('2301.04633v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.04633v2-abstract-full" style="display: none;"> We study the performance of a cloud-based GPU-accelerated inference server to speed up event reconstruction in neutrino data batch jobs. Using detector data from the ProtoDUNE experiment and employing the standard DUNE grid job submission tools, we attempt to reprocess the data by running several thousand concurrent grid jobs, a rate we expect to be typical of current and future neutrino physics experiments. We process most of the dataset with the GPU version of our processing algorithm and the remainder with the CPU version for timing comparisons. We find that a 100-GPU cloud-based server is able to easily meet the processing demand, and that using the GPU version of the event processing algorithm is two times faster than processing these data with the CPU version when comparing to the newest CPUs in our sample. The amount of data transferred to the inference server during the GPU runs can overwhelm even the highest-bandwidth network switches, however, unless care is taken to observe network facility limits or otherwise distribute the jobs to multiple sites. We discuss the lessons learned from this processing campaign and several avenues for future improvements. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.04633v2-abstract-full').style.display = 'none'; document.getElementById('2301.04633v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 9 figures, matches accepted version</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-22-944-ND-PPD-SCD </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Comput Softw Big Sci 7, 11 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.00677">arXiv:2211.00677</a> <span> [<a href="https://arxiv.org/pdf/2211.00677">pdf</a>, <a href="https://arxiv.org/format/2211.00677">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Semi-Supervised Domain Adaptation for Cross-Survey Galaxy Morphology Classification and Anomaly Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=%C4%86iprijanovi%C4%87%2C+A">Aleksandra 膯iprijanovi膰</a>, <a href="/search/cs?searchtype=author&query=Lewis%2C+A">Ashia Lewis</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Madireddy%2C+S">Sandeep Madireddy</a>, <a href="/search/cs?searchtype=author&query=Nord%2C+B">Brian Nord</a>, <a href="/search/cs?searchtype=author&query=Perdue%2C+G+N">Gabriel N. Perdue</a>, <a href="/search/cs?searchtype=author&query=Wild%2C+S+M">Stefan M. Wild</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.00677v3-abstract-short" style="display: inline;"> In the era of big astronomical surveys, our ability to leverage artificial intelligence algorithms simultaneously for multiple datasets will open new avenues for scientific discovery. Unfortunately, simply training a deep neural network on images from one data domain often leads to very poor performance on any other dataset. Here we develop a Universal Domain Adaptation method DeepAstroUDA, capabl… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.00677v3-abstract-full').style.display = 'inline'; document.getElementById('2211.00677v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.00677v3-abstract-full" style="display: none;"> In the era of big astronomical surveys, our ability to leverage artificial intelligence algorithms simultaneously for multiple datasets will open new avenues for scientific discovery. Unfortunately, simply training a deep neural network on images from one data domain often leads to very poor performance on any other dataset. Here we develop a Universal Domain Adaptation method DeepAstroUDA, capable of performing semi-supervised domain alignment that can be applied to datasets with different types of class overlap. Extra classes can be present in any of the two datasets, and the method can even be used in the presence of unknown classes. For the first time, we demonstrate the successful use of domain adaptation on two very different observational datasets (from SDSS and DECaLS). We show that our method is capable of bridging the gap between two astronomical surveys, and also performs well for anomaly detection and clustering of unknown data in the unlabeled dataset. We apply our model to two examples of galaxy morphology classification tasks with anomaly detection: 1) classifying spiral and elliptical galaxies with detection of merging galaxies (three classes including one unknown anomaly class); 2) a more granular problem where the classes describe more detailed morphological properties of galaxies, with the detection of gravitational lenses (ten classes including one unknown anomaly class). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.00677v3-abstract-full').style.display = 'none'; document.getElementById('2211.00677v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">3 figures, 1 table; accepted to Machine Learning and the Physical Sciences - Workshop at the 36th conference on Neural Information Processing Systems (NeurIPS)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-CONF-22-791-SCD </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.16255">arXiv:2203.16255</a> <span> [<a href="https://arxiv.org/pdf/2203.16255">pdf</a>, <a href="https://arxiv.org/format/2203.16255">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="General Relativity and Quantum Cosmology">gr-qc</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> </div> <p class="title is-5 mathjax"> Physics Community Needs, Tools, and Resources for Machine Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&query=Katsavounidis%2C+E">Erik Katsavounidis</a>, <a href="/search/cs?searchtype=author&query=McCormack%2C+W+P">William Patrick McCormack</a>, <a href="/search/cs?searchtype=author&query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&query=Feng%2C+Y">Yongbin Feng</a>, <a href="/search/cs?searchtype=author&query=Gandrakota%2C+A">Abhijith Gandrakota</a>, <a href="/search/cs?searchtype=author&query=Herwig%2C+C">Christian Herwig</a>, <a href="/search/cs?searchtype=author&query=Holzman%2C+B">Burt Holzman</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+T">Tingjun Yang</a>, <a href="/search/cs?searchtype=author&query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&query=Coughlin%2C+M">Michael Coughlin</a>, <a href="/search/cs?searchtype=author&query=Hauck%2C+S">Scott Hauck</a>, <a href="/search/cs?searchtype=author&query=Hsu%2C+S">Shih-Chieh Hsu</a>, <a href="/search/cs?searchtype=author&query=Khoda%2C+E+E">Elham E Khoda</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+D">Deming Chen</a>, <a href="/search/cs?searchtype=author&query=Neubauer%2C+M">Mark Neubauer</a>, <a href="/search/cs?searchtype=author&query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&query=Karagiorgi%2C+G">Georgia Karagiorgi</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+M">Mia Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.16255v1-abstract-short" style="display: inline;"> Machine learning (ML) is becoming an increasingly important component of cutting-edge physics research, but its computational requirements present significant challenges. In this white paper, we discuss the needs of the physics community regarding ML across latency and throughput regimes, the tools and resources that offer the possibility of addressing these needs, and how these can be best utiliz… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.16255v1-abstract-full').style.display = 'inline'; document.getElementById('2203.16255v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.16255v1-abstract-full" style="display: none;"> Machine learning (ML) is becoming an increasingly important component of cutting-edge physics research, but its computational requirements present significant challenges. In this white paper, we discuss the needs of the physics community regarding ML across latency and throughput regimes, the tools and resources that offer the possibility of addressing these needs, and how these can be best utilized and accessed in the coming years. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.16255v1-abstract-full').style.display = 'none'; document.getElementById('2203.16255v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Contribution to Snowmass 2021, 33 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.08806">arXiv:2203.08806</a> <span> [<a href="https://arxiv.org/pdf/2203.08806">pdf</a>, <a href="https://arxiv.org/format/2203.08806">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> </div> <p class="title is-5 mathjax"> New directions for surrogate models and differentiable programming for High Energy Physics detector simulation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Adelmann%2C+A">Andreas Adelmann</a>, <a href="/search/cs?searchtype=author&query=Hopkins%2C+W">Walter Hopkins</a>, <a href="/search/cs?searchtype=author&query=Kourlitis%2C+E">Evangelos Kourlitis</a>, <a href="/search/cs?searchtype=author&query=Kagan%2C+M">Michael Kagan</a>, <a href="/search/cs?searchtype=author&query=Kasieczka%2C+G">Gregor Kasieczka</a>, <a href="/search/cs?searchtype=author&query=Krause%2C+C">Claudius Krause</a>, <a href="/search/cs?searchtype=author&query=Shih%2C+D">David Shih</a>, <a href="/search/cs?searchtype=author&query=Mikuni%2C+V">Vinicius Mikuni</a>, <a href="/search/cs?searchtype=author&query=Nachman%2C+B">Benjamin Nachman</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Winklehner%2C+D">Daniel Winklehner</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.08806v1-abstract-short" style="display: inline;"> The computational cost for high energy physics detector simulation in future experimental facilities is going to exceed the current available resources. To overcome this challenge, new ideas on surrogate models using machine learning methods are being explored to replace computationally expensive components. Additionally, differentiable programming has been proposed as a complementary approach, pr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.08806v1-abstract-full').style.display = 'inline'; document.getElementById('2203.08806v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.08806v1-abstract-full" style="display: none;"> The computational cost for high energy physics detector simulation in future experimental facilities is going to exceed the current available resources. To overcome this challenge, new ideas on surrogate models using machine learning methods are being explored to replace computationally expensive components. Additionally, differentiable programming has been proposed as a complementary approach, providing controllable and scalable simulation routines. In this document, new and ongoing efforts for surrogate models and differential programming applied to detector simulation are discussed in the context of the 2021 Particle Physics Community Planning Exercise (`Snowmass'). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.08806v1-abstract-full').style.display = 'none'; document.getElementById('2203.08806v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">contribution to Snowmass 2021</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-CONF-22-199-SCD </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.14299">arXiv:2112.14299</a> <span> [<a href="https://arxiv.org/pdf/2112.14299">pdf</a>, <a href="https://arxiv.org/format/2112.14299">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DeepAdversaries: Examining the Robustness of Deep Learning Models for Galaxy Morphology Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=%C4%86iprijanovi%C4%87%2C+A">Aleksandra 膯iprijanovi膰</a>, <a href="/search/cs?searchtype=author&query=Kafkes%2C+D">Diana Kafkes</a>, <a href="/search/cs?searchtype=author&query=Snyder%2C+G">Gregory Snyder</a>, <a href="/search/cs?searchtype=author&query=S%C3%A1nchez%2C+F+J">F. Javier S谩nchez</a>, <a href="/search/cs?searchtype=author&query=Perdue%2C+G+N">Gabriel Nathan Perdue</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Nord%2C+B">Brian Nord</a>, <a href="/search/cs?searchtype=author&query=Madireddy%2C+S">Sandeep Madireddy</a>, <a href="/search/cs?searchtype=author&query=Wild%2C+S+M">Stefan M. Wild</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.14299v3-abstract-short" style="display: inline;"> With increased adoption of supervised deep learning methods for processing and analysis of cosmological survey data, the assessment of data perturbation effects (that can naturally occur in the data processing and analysis pipelines) and the development of methods that increase model robustness are increasingly important. In the context of morphological classification of galaxies, we study the eff… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.14299v3-abstract-full').style.display = 'inline'; document.getElementById('2112.14299v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.14299v3-abstract-full" style="display: none;"> With increased adoption of supervised deep learning methods for processing and analysis of cosmological survey data, the assessment of data perturbation effects (that can naturally occur in the data processing and analysis pipelines) and the development of methods that increase model robustness are increasingly important. In the context of morphological classification of galaxies, we study the effects of perturbations in imaging data. In particular, we examine the consequences of using neural networks when training on baseline data and testing on perturbed data. We consider perturbations associated with two primary sources: 1) increased observational noise as represented by higher levels of Poisson noise and 2) data processing noise incurred by steps such as image compression or telescope errors as represented by one-pixel adversarial attacks. We also test the efficacy of domain adaptation techniques in mitigating the perturbation-driven errors. We use classification accuracy, latent space visualizations, and latent space distance to assess model robustness. Without domain adaptation, we find that processing pixel-level errors easily flip the classification into an incorrect class and that higher observational noise makes the model trained on low-noise data unable to classify galaxy morphologies. On the other hand, we show that training with domain adaptation improves model robustness and mitigates the effects of these perturbations, improving the classification accuracy by 23% on data with higher observational noise. Domain adaptation also increases by a factor of ~2.3 the latent space distance between the baseline and the incorrectly classified one-pixel perturbed image, making the model more robust to inadvertent perturbations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.14299v3-abstract-full').style.display = 'none'; document.getElementById('2112.14299v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 6 figures, 5 tables; accepted in MLST</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-21-767-SCD </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.02864">arXiv:2112.02864</a> <span> [<a href="https://arxiv.org/pdf/2112.02864">pdf</a>, <a href="https://arxiv.org/format/2112.02864">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/JHEP02(2022)074">10.1007/JHEP02(2022)074 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Autoencoders for Semivisible Jet Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Canelli%2C+F">Florencia Canelli</a>, <a href="/search/cs?searchtype=author&query=de+Cosa%2C+A">Annapaola de Cosa</a>, <a href="/search/cs?searchtype=author&query=Pottier%2C+L+L">Luc Le Pottier</a>, <a href="/search/cs?searchtype=author&query=Niedziela%2C+J">Jeremi Niedziela</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Pierini%2C+M">Maurizio Pierini</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.02864v3-abstract-short" style="display: inline;"> The production of dark matter particles from confining dark sectors may lead to many novel experimental signatures. Depending on the details of the theory, dark quark production in proton-proton collisions could result in semivisible jets of particles: collimated sprays of dark hadrons of which only some are detectable by particle collider experiments. The experimental signature is characterised b… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.02864v3-abstract-full').style.display = 'inline'; document.getElementById('2112.02864v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.02864v3-abstract-full" style="display: none;"> The production of dark matter particles from confining dark sectors may lead to many novel experimental signatures. Depending on the details of the theory, dark quark production in proton-proton collisions could result in semivisible jets of particles: collimated sprays of dark hadrons of which only some are detectable by particle collider experiments. The experimental signature is characterised by the presence of reconstructed missing momentum collinear with the visible components of the jets. This complex topology is sensitive to detector inefficiencies and mis-reconstruction that generate artificial missing momentum. With this work, we propose a signal-agnostic strategy to reject ordinary jets and identify semivisible jets via anomaly detection techniques. A deep neural autoencoder network with jet substructure variables as input proves highly useful for analyzing anomalous jets. The study focuses on the semivisible jet signature; however, the technique can apply to any new physics model that predicts signatures with anomalous jets from non-SM particles. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.02864v3-abstract-full').style.display = 'none'; document.getElementById('2112.02864v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 10 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-21-653-CMS </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Journal of High Energy Physics volume 2022, Article number: 74 (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.00961">arXiv:2111.00961</a> <span> [<a href="https://arxiv.org/pdf/2111.00961">pdf</a>, <a href="https://arxiv.org/format/2111.00961">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Robustness of deep learning algorithms in astronomy -- galaxy morphology studies </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=%C4%86iprijanovi%C4%87%2C+A">A. 膯iprijanovi膰</a>, <a href="/search/cs?searchtype=author&query=Kafkes%2C+D">D. Kafkes</a>, <a href="/search/cs?searchtype=author&query=Perdue%2C+G+N">G. N. Perdue</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">K. Pedro</a>, <a href="/search/cs?searchtype=author&query=Snyder%2C+G">G. Snyder</a>, <a href="/search/cs?searchtype=author&query=S%C3%A1nchez%2C+F+J">F. J. S谩nchez</a>, <a href="/search/cs?searchtype=author&query=Madireddy%2C+S">S. Madireddy</a>, <a href="/search/cs?searchtype=author&query=Wild%2C+S+M">S. M. Wild</a>, <a href="/search/cs?searchtype=author&query=Nord%2C+B">B. Nord</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.00961v2-abstract-short" style="display: inline;"> Deep learning models are being increasingly adopted in wide array of scientific domains, especially to handle high-dimensionality and volume of the scientific data. However, these models tend to be brittle due to their complexity and overparametrization, especially to the inadvertent adversarial perturbations that can appear due to common image processing such as compression or blurring that are o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.00961v2-abstract-full').style.display = 'inline'; document.getElementById('2111.00961v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.00961v2-abstract-full" style="display: none;"> Deep learning models are being increasingly adopted in wide array of scientific domains, especially to handle high-dimensionality and volume of the scientific data. However, these models tend to be brittle due to their complexity and overparametrization, especially to the inadvertent adversarial perturbations that can appear due to common image processing such as compression or blurring that are often seen with real scientific data. It is crucial to understand this brittleness and develop models robust to these adversarial perturbations. To this end, we study the effect of observational noise from the exposure time, as well as the worst case scenario of a one-pixel attack as a proxy for compression or telescope errors on performance of ResNet18 trained to distinguish between galaxies of different morphologies in LSST mock data. We also explore how domain adaptation techniques can help improve model robustness in case of this type of naturally occurring attacks and help scientists build more trustworthy and stable models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.00961v2-abstract-full').style.display = 'none'; document.getElementById('2111.00961v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in: Fourth Workshop on Machine Learning and the Physical Sciences (35th Conference on Neural Information Processing Systems; NeurIPS2021); final version</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-CONF-21-561-SCD </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.05108">arXiv:2101.05108</a> <span> [<a href="https://arxiv.org/pdf/2101.05108">pdf</a>, <a href="https://arxiv.org/format/2101.05108">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/2632-2153/ac0ea1">10.1088/2632-2153/ac0ea1 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fast convolutional neural networks on FPGAs with hls4ml </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Aarrestad%2C+T">Thea Aarrestad</a>, <a href="/search/cs?searchtype=author&query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&query=Ghielmetti%2C+N">Nicol貌 Ghielmetti</a>, <a href="/search/cs?searchtype=author&query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&query=Summers%2C+S">Sioni Summers</a>, <a href="/search/cs?searchtype=author&query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&query=Petersson%2C+C">Christoffer Petersson</a>, <a href="/search/cs?searchtype=author&query=Linander%2C+H">Hampus Linander</a>, <a href="/search/cs?searchtype=author&query=Iiyama%2C+Y">Yutaro Iiyama</a>, <a href="/search/cs?searchtype=author&query=Di+Guglielmo%2C+G">Giuseppe Di Guglielmo</a>, <a href="/search/cs?searchtype=author&query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&query=Jindariani%2C+S">Sergo Jindariani</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+M">Mia Liu</a>, <a href="/search/cs?searchtype=author&query=Kreinar%2C+E">Edward Kreinar</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+Z">Zhenbin Wu</a>, <a href="/search/cs?searchtype=author&query=Hoang%2C+D">Duc Hoang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.05108v2-abstract-short" style="display: inline;"> We introduce an automated tool for deploying ultra low-latency, low-power deep neural networks with convolutional layers on FPGAs. By extending the hls4ml library, we demonstrate an inference latency of $5\,渭$s using convolutional architectures, targeting microsecond latency applications like those at the CERN Large Hadron Collider. Considering benchmark models trained on the Street View House Num… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.05108v2-abstract-full').style.display = 'inline'; document.getElementById('2101.05108v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.05108v2-abstract-full" style="display: none;"> We introduce an automated tool for deploying ultra low-latency, low-power deep neural networks with convolutional layers on FPGAs. By extending the hls4ml library, we demonstrate an inference latency of $5\,渭$s using convolutional architectures, targeting microsecond latency applications like those at the CERN Large Hadron Collider. Considering benchmark models trained on the Street View House Numbers Dataset, we demonstrate various methods for model compression in order to fit the computational constraints of a typical FPGA device used in trigger and data acquisition systems of particle detectors. In particular, we discuss pruning and quantization-aware training, and demonstrate how resource utilization can be significantly reduced with little to no loss in model accuracy. We show that the FPGA critical resource consumption can be reduced by 97% with zero loss in model accuracy, and by 99% when tolerating a 6% accuracy degradation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.05108v2-abstract-full').style.display = 'none'; document.getElementById('2101.05108v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 18 figures, 4 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Mach. Learn.: Sci. Technol. 2 045015 (2021) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.08556">arXiv:2010.08556</a> <span> [<a href="https://arxiv.org/pdf/2010.08556">pdf</a>, <a href="https://arxiv.org/format/2010.08556">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/H2RC51942.2020.00010">10.1109/H2RC51942.2020.00010 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> FPGAs-as-a-Service Toolkit (FaaST) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Rankin%2C+D+S">Dylan Sheldon Rankin</a>, <a href="/search/cs?searchtype=author&query=Krupa%2C+J">Jeffrey Krupa</a>, <a href="/search/cs?searchtype=author&query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&query=Flechas%2C+M+A">Maria Acosta Flechas</a>, <a href="/search/cs?searchtype=author&query=Holzman%2C+B">Burt Holzman</a>, <a href="/search/cs?searchtype=author&query=Klijnsma%2C+T">Thomas Klijnsma</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&query=Hauck%2C+S">Scott Hauck</a>, <a href="/search/cs?searchtype=author&query=Hsu%2C+S">Shih-Chieh Hsu</a>, <a href="/search/cs?searchtype=author&query=Trahms%2C+M">Matthew Trahms</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+K">Kelvin Lin</a>, <a href="/search/cs?searchtype=author&query=Lou%2C+Y">Yu Lou</a>, <a href="/search/cs?searchtype=author&query=Ho%2C+T">Ta-Wei Ho</a>, <a href="/search/cs?searchtype=author&query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+M">Mia Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.08556v1-abstract-short" style="display: inline;"> Computing needs for high energy physics are already intensive and are expected to increase drastically in the coming years. In this context, heterogeneous computing, specifically as-a-service computing, has the potential for significant gains over traditional computing models. Although previous studies and packages in the field of heterogeneous computing have focused on GPUs as accelerators, FPGAs… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.08556v1-abstract-full').style.display = 'inline'; document.getElementById('2010.08556v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.08556v1-abstract-full" style="display: none;"> Computing needs for high energy physics are already intensive and are expected to increase drastically in the coming years. In this context, heterogeneous computing, specifically as-a-service computing, has the potential for significant gains over traditional computing models. Although previous studies and packages in the field of heterogeneous computing have focused on GPUs as accelerators, FPGAs are an extremely promising option as well. A series of workflows are developed to establish the performance capabilities of FPGAs as a service. Multiple different devices and a range of algorithms for use in high energy physics are studied. For a small, dense network, the throughput can be improved by an order of magnitude with respect to GPUs as a service. For large convolutional networks, the throughput is found to be comparable to GPUs as a service. This work represents the first open-source FPGAs-as-a-service toolkit. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.08556v1-abstract-full').style.display = 'none'; document.getElementById('2010.08556v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 7 figures, to appear in proceedings of the 2020 IEEE/ACM International Workshop on Heterogeneous High-performance Reconfigurable Computing</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-CONF-20-426-SCD </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> 2020 IEEE/ACM International Workshop on Heterogeneous High-performance Reconfigurable Computing (H2RC), 2020, pp. 38-47 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.04509">arXiv:2009.04509</a> <span> [<a href="https://arxiv.org/pdf/2009.04509">pdf</a>, <a href="https://arxiv.org/format/2009.04509">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.3389/fdata.2020.604083">10.3389/fdata.2020.604083 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> GPU-accelerated machine learning inference as a service for computing in neutrino experiments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wang%2C+M">Michael Wang</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+T">Tingjun Yang</a>, <a href="/search/cs?searchtype=author&query=Flechas%2C+M+A">Maria Acosta Flechas</a>, <a href="/search/cs?searchtype=author&query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&query=Hawks%2C+B">Benjamin Hawks</a>, <a href="/search/cs?searchtype=author&query=Holzman%2C+B">Burt Holzman</a>, <a href="/search/cs?searchtype=author&query=Knoepfel%2C+K">Kyle Knoepfel</a>, <a href="/search/cs?searchtype=author&query=Krupa%2C+J">Jeffrey Krupa</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Tran%2C+N">Nhan Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.04509v2-abstract-short" style="display: inline;"> Machine learning algorithms are becoming increasingly prevalent and performant in the reconstruction of events in accelerator-based neutrino experiments. These sophisticated algorithms can be computationally expensive. At the same time, the data volumes of such experiments are rapidly increasing. The demand to process billions of neutrino events with many machine learning algorithm inferences crea… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.04509v2-abstract-full').style.display = 'inline'; document.getElementById('2009.04509v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.04509v2-abstract-full" style="display: none;"> Machine learning algorithms are becoming increasingly prevalent and performant in the reconstruction of events in accelerator-based neutrino experiments. These sophisticated algorithms can be computationally expensive. At the same time, the data volumes of such experiments are rapidly increasing. The demand to process billions of neutrino events with many machine learning algorithm inferences creates a computing challenge. We explore a computing model in which heterogeneous computing with GPU coprocessors is made available as a web service. The coprocessors can be efficiently and elastically deployed to provide the right amount of computing for a given processing task. With our approach, Services for Optimized Network Inference on Coprocessors (SONIC), we integrate GPU acceleration specifically for the ProtoDUNE-SP reconstruction chain without disrupting the native computing workflow. With our integrated framework, we accelerate the most time-consuming task, track and particle shower hit identification, by a factor of 17. This results in a factor of 2.7 reduction in the total processing time when compared with CPU-only production. For this particular task, only 1 GPU is required for every 68 CPU threads, providing a cost-effective solution. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.04509v2-abstract-full').style.display = 'none'; document.getElementById('2009.04509v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 7 figures, 2 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-20-428-ND-SCD </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.12712">arXiv:2008.12712</a> <span> [<a href="https://arxiv.org/pdf/2008.12712">pdf</a>, <a href="https://arxiv.org/format/2008.12712">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1051/epjconf/202024506012">10.1051/epjconf/202024506012 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Coffea -- Columnar Object Framework For Effective Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Smith%2C+N">Nicholas Smith</a>, <a href="/search/cs?searchtype=author&query=Gray%2C+L">Lindsey Gray</a>, <a href="/search/cs?searchtype=author&query=Cremonesi%2C+M">Matteo Cremonesi</a>, <a href="/search/cs?searchtype=author&query=Jayatilaka%2C+B">Bo Jayatilaka</a>, <a href="/search/cs?searchtype=author&query=Gutsche%2C+O">Oliver Gutsche</a>, <a href="/search/cs?searchtype=author&query=Hall%2C+A">Allison Hall</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Acosta%2C+M">Maria Acosta</a>, <a href="/search/cs?searchtype=author&query=Melo%2C+A">Andrew Melo</a>, <a href="/search/cs?searchtype=author&query=Belforte%2C+S">Stefano Belforte</a>, <a href="/search/cs?searchtype=author&query=Pivarski%2C+J">Jim Pivarski</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.12712v2-abstract-short" style="display: inline;"> The coffea framework provides a new approach to High-Energy Physics analysis, via columnar operations, that improves time-to-insight, scalability, portability, and reproducibility of analysis. It is implemented with the Python programming language, the scientific python package ecosystem, and commodity big data technologies. To achieve this suite of improvements across many use cases, coffea takes… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.12712v2-abstract-full').style.display = 'inline'; document.getElementById('2008.12712v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.12712v2-abstract-full" style="display: none;"> The coffea framework provides a new approach to High-Energy Physics analysis, via columnar operations, that improves time-to-insight, scalability, portability, and reproducibility of analysis. It is implemented with the Python programming language, the scientific python package ecosystem, and commodity big data technologies. To achieve this suite of improvements across many use cases, coffea takes a factorized approach, separating the analysis implementation and data delivery scheme. All analysis operations are implemented using the NumPy or awkward-array packages which are wrapped to yield user code whose purpose is quickly intuited. Various data delivery schemes are wrapped into a common front-end which accepts user inputs and code, and returns user defined outputs. We will discuss our experience in implementing analysis of CMS data using the coffea framework along with a discussion of the user experience and future directions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.12712v2-abstract-full').style.display = 'none'; document.getElementById('2008.12712v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">As presented at CHEP 2019</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> EPJ Web of Conferences 245, 06012 (2020) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.03601">arXiv:2008.03601</a> <span> [<a href="https://arxiv.org/pdf/2008.03601">pdf</a>, <a href="https://arxiv.org/format/2008.03601">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.3389/fdata.2020.598927">10.3389/fdata.2020.598927 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Distance-Weighted Graph Neural Networks on FPGAs for Real-Time Particle Reconstruction in High Energy Physics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Iiyama%2C+Y">Yutaro Iiyama</a>, <a href="/search/cs?searchtype=author&query=Cerminara%2C+G">Gianluca Cerminara</a>, <a href="/search/cs?searchtype=author&query=Gupta%2C+A">Abhijay Gupta</a>, <a href="/search/cs?searchtype=author&query=Kieseler%2C+J">Jan Kieseler</a>, <a href="/search/cs?searchtype=author&query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&query=Qasim%2C+S+R">Shah Rukh Qasim</a>, <a href="/search/cs?searchtype=author&query=Rieger%2C+M">Marcel Rieger</a>, <a href="/search/cs?searchtype=author&query=Summers%2C+S">Sioni Summers</a>, <a href="/search/cs?searchtype=author&query=Van+Onsem%2C+G">Gerrit Van Onsem</a>, <a href="/search/cs?searchtype=author&query=Wozniak%2C+K">Kinga Wozniak</a>, <a href="/search/cs?searchtype=author&query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&query=Di+Guglielmo%2C+G">Giuseppe Di Guglielmo</a>, <a href="/search/cs?searchtype=author&query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&query=Jindariani%2C+S">Sergo Jindariani</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+M">Mia Liu</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&query=Kreinar%2C+E">Edward Kreinar</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+Z">Zhenbin Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.03601v2-abstract-short" style="display: inline;"> Graph neural networks have been shown to achieve excellent performance for several crucial tasks in particle physics, such as charged particle tracking, jet tagging, and clustering. An important domain for the application of these networks is the FGPA-based first layer of real-time data filtering at the CERN Large Hadron Collider, which has strict latency and resource constraints. We discuss how t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.03601v2-abstract-full').style.display = 'inline'; document.getElementById('2008.03601v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.03601v2-abstract-full" style="display: none;"> Graph neural networks have been shown to achieve excellent performance for several crucial tasks in particle physics, such as charged particle tracking, jet tagging, and clustering. An important domain for the application of these networks is the FGPA-based first layer of real-time data filtering at the CERN Large Hadron Collider, which has strict latency and resource constraints. We discuss how to design distance-weighted graph networks that can be executed with a latency of less than 1$渭\mathrm{s}$ on an FPGA. To do so, we consider a representative task associated to particle reconstruction and identification in a next-generation calorimeter operating at a particle collider. We use a graph network architecture developed for such purposes, and apply additional simplifications to match the computing constraints of Level-1 trigger systems, including weight quantization. Using the $\mathtt{hls4ml}$ library, we convert the compressed models into firmware to be implemented on an FPGA. Performance of the synthesized models is presented both in terms of inference accuracy and resource usage. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.03601v2-abstract-full').style.display = 'none'; document.getElementById('2008.03601v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 4 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-20-405-E-SCD </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Frontiers in Big Data 3 (2021) 44 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.10359">arXiv:2007.10359</a> <span> [<a href="https://arxiv.org/pdf/2007.10359">pdf</a>, <a href="https://arxiv.org/format/2007.10359">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/2632-2153/abec21">10.1088/2632-2153/abec21 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> GPU coprocessors as a service for deep learning inference in high energy physics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Krupa%2C+J">Jeffrey Krupa</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+K">Kelvin Lin</a>, <a href="/search/cs?searchtype=author&query=Flechas%2C+M+A">Maria Acosta Flechas</a>, <a href="/search/cs?searchtype=author&query=Dinsmore%2C+J">Jack Dinsmore</a>, <a href="/search/cs?searchtype=author&query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&query=Hauck%2C+S">Scott Hauck</a>, <a href="/search/cs?searchtype=author&query=Holzman%2C+B">Burt Holzman</a>, <a href="/search/cs?searchtype=author&query=Hsu%2C+S">Shih-Chieh Hsu</a>, <a href="/search/cs?searchtype=author&query=Klijnsma%2C+T">Thomas Klijnsma</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+M">Mia Liu</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&query=Suaysom%2C+N">Natchanon Suaysom</a>, <a href="/search/cs?searchtype=author&query=Trahms%2C+M">Matt Trahms</a>, <a href="/search/cs?searchtype=author&query=Tran%2C+N">Nhan Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.10359v2-abstract-short" style="display: inline;"> In the next decade, the demands for computing in large scientific experiments are expected to grow tremendously. During the same time period, CPU performance increases will be limited. At the CERN Large Hadron Collider (LHC), these two issues will confront one another as the collider is upgraded for high luminosity running. Alternative processors such as graphics processing units (GPUs) can resolv… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.10359v2-abstract-full').style.display = 'inline'; document.getElementById('2007.10359v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.10359v2-abstract-full" style="display: none;"> In the next decade, the demands for computing in large scientific experiments are expected to grow tremendously. During the same time period, CPU performance increases will be limited. At the CERN Large Hadron Collider (LHC), these two issues will confront one another as the collider is upgraded for high luminosity running. Alternative processors such as graphics processing units (GPUs) can resolve this confrontation provided that algorithms can be sufficiently accelerated. In many cases, algorithmic speedups are found to be largest through the adoption of deep learning algorithms. We present a comprehensive exploration of the use of GPU-based hardware acceleration for deep learning inference within the data reconstruction workflow of high energy physics. We present several realistic examples and discuss a strategy for the seamless integration of coprocessors so that the LHC can maintain, if not exceed, its current performance throughout its running. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.10359v2-abstract-full').style.display = 'none'; document.getElementById('2007.10359v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">26 pages, 7 figures, 2 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-20-338-E-SCD </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Mach. Learn.: Sci. Technol. 2 (2021) 035005 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.06308">arXiv:2003.06308</a> <span> [<a href="https://arxiv.org/pdf/2003.06308">pdf</a>, <a href="https://arxiv.org/format/2003.06308">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/2632-2153/aba042">10.1088/2632-2153/aba042 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Compressing deep neural networks on FPGAs to binary and ternary precision with HLS4ML </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Di+Guglielmo%2C+G">Giuseppe Di Guglielmo</a>, <a href="/search/cs?searchtype=author&query=Duarte%2C+J">Javier Duarte</a>, <a href="/search/cs?searchtype=author&query=Harris%2C+P">Philip Harris</a>, <a href="/search/cs?searchtype=author&query=Hoang%2C+D">Duc Hoang</a>, <a href="/search/cs?searchtype=author&query=Jindariani%2C+S">Sergo Jindariani</a>, <a href="/search/cs?searchtype=author&query=Kreinar%2C+E">Edward Kreinar</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+M">Mia Liu</a>, <a href="/search/cs?searchtype=author&query=Loncar%2C+V">Vladimir Loncar</a>, <a href="/search/cs?searchtype=author&query=Ngadiuba%2C+J">Jennifer Ngadiuba</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">Kevin Pedro</a>, <a href="/search/cs?searchtype=author&query=Pierini%2C+M">Maurizio Pierini</a>, <a href="/search/cs?searchtype=author&query=Rankin%2C+D">Dylan Rankin</a>, <a href="/search/cs?searchtype=author&query=Sagear%2C+S">Sheila Sagear</a>, <a href="/search/cs?searchtype=author&query=Summers%2C+S">Sioni Summers</a>, <a href="/search/cs?searchtype=author&query=Tran%2C+N">Nhan Tran</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+Z">Zhenbin Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.06308v2-abstract-short" style="display: inline;"> We present the implementation of binary and ternary neural networks in the hls4ml library, designed to automatically convert deep neural network models to digital circuits with FPGA firmware. Starting from benchmark models trained with floating point precision, we investigate different strategies to reduce the network's resource consumption by reducing the numerical precision of the network parame… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.06308v2-abstract-full').style.display = 'inline'; document.getElementById('2003.06308v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.06308v2-abstract-full" style="display: none;"> We present the implementation of binary and ternary neural networks in the hls4ml library, designed to automatically convert deep neural network models to digital circuits with FPGA firmware. Starting from benchmark models trained with floating point precision, we investigate different strategies to reduce the network's resource consumption by reducing the numerical precision of the network parameters to binary or ternary. We discuss the trade-off between model accuracy and resource consumption. In addition, we show how to balance between latency and accuracy by retaining full precision on a selected subset of network components. As an example, we consider two multiclass classification tasks: handwritten digit recognition with the MNIST data set and jet identification with simulated proton-proton collisions at the CERN Large Hadron Collider. The binary and ternary implementation has similar performance to the higher precision implementation while using drastically fewer FPGA resources. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.06308v2-abstract-full').style.display = 'none'; document.getElementById('2003.06308v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Update to MLST journal version</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-20-167-PPD-SCD </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Mach. Learn.: Sci. Technol. 2, 015001 (2020) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.05796">arXiv:1911.05796</a> <span> [<a href="https://arxiv.org/pdf/1911.05796">pdf</a>, <a href="https://arxiv.org/ps/1911.05796">ps</a>, <a href="https://arxiv.org/format/1911.05796">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> Response to NITRD, NCO, NSF Request for Information on "Update to the 2016 National Artificial Intelligence Research and Development Strategic Plan" </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Amundson%2C+J">J. Amundson</a>, <a href="/search/cs?searchtype=author&query=Annis%2C+J">J. Annis</a>, <a href="/search/cs?searchtype=author&query=Avestruz%2C+C">C. Avestruz</a>, <a href="/search/cs?searchtype=author&query=Bowring%2C+D">D. Bowring</a>, <a href="/search/cs?searchtype=author&query=Caldeira%2C+J">J. Caldeira</a>, <a href="/search/cs?searchtype=author&query=Cerati%2C+G">G. Cerati</a>, <a href="/search/cs?searchtype=author&query=Chang%2C+C">C. Chang</a>, <a href="/search/cs?searchtype=author&query=Dodelson%2C+S">S. Dodelson</a>, <a href="/search/cs?searchtype=author&query=Elvira%2C+D">D. Elvira</a>, <a href="/search/cs?searchtype=author&query=Farahi%2C+A">A. Farahi</a>, <a href="/search/cs?searchtype=author&query=Genser%2C+K">K. Genser</a>, <a href="/search/cs?searchtype=author&query=Gray%2C+L">L. Gray</a>, <a href="/search/cs?searchtype=author&query=Gutsche%2C+O">O. Gutsche</a>, <a href="/search/cs?searchtype=author&query=Harris%2C+P">P. Harris</a>, <a href="/search/cs?searchtype=author&query=Kinney%2C+J">J. Kinney</a>, <a href="/search/cs?searchtype=author&query=Kowalkowski%2C+J+B">J. B. Kowalkowski</a>, <a href="/search/cs?searchtype=author&query=Kutschke%2C+R">R. Kutschke</a>, <a href="/search/cs?searchtype=author&query=Mrenna%2C+S">S. Mrenna</a>, <a href="/search/cs?searchtype=author&query=Nord%2C+B">B. Nord</a>, <a href="/search/cs?searchtype=author&query=Para%2C+A">A. Para</a>, <a href="/search/cs?searchtype=author&query=Pedro%2C+K">K. Pedro</a>, <a href="/search/cs?searchtype=author&query=Perdue%2C+G+N">G. N. Perdue</a>, <a href="/search/cs?searchtype=author&query=Scheinker%2C+A">A. Scheinker</a>, <a href="/search/cs?searchtype=author&query=Spentzouris%2C+P">P. Spentzouris</a>, <a href="/search/cs?searchtype=author&query=John%2C+J+S">J. St. John</a> , et al. (5 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.05796v1-abstract-short" style="display: inline;"> We present a response to the 2018 Request for Information (RFI) from the NITRD, NCO, NSF regarding the "Update to the 2016 National Artificial Intelligence Research and Development Strategic Plan." Through this document, we provide a response to the question of whether and how the National Artificial Intelligence Research and Development Strategic Plan (NAIRDSP) should be updated from the perspect… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.05796v1-abstract-full').style.display = 'inline'; document.getElementById('1911.05796v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.05796v1-abstract-full" style="display: none;"> We present a response to the 2018 Request for Information (RFI) from the NITRD, NCO, NSF regarding the "Update to the 2016 National Artificial Intelligence Research and Development Strategic Plan." Through this document, we provide a response to the question of whether and how the National Artificial Intelligence Research and Development Strategic Plan (NAIRDSP) should be updated from the perspective of Fermilab, America's premier national laboratory for High Energy Physics (HEP). We believe the NAIRDSP should be extended in light of the rapid pace of development and innovation in the field of Artificial Intelligence (AI) since 2016, and present our recommendations below. AI has profoundly impacted many areas of human life, promising to dramatically reshape society --- e.g., economy, education, science --- in the coming years. We are still early in this process. It is critical to invest now in this technology to ensure it is safe and deployed ethically. Science and society both have a strong need for accuracy, efficiency, transparency, and accountability in algorithms, making investments in scientific AI particularly valuable. Thus far the US has been a leader in AI technologies, and we believe as a national Laboratory it is crucial to help maintain and extend this leadership. Moreover, investments in AI will be important for maintaining US leadership in the physical sciences. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.05796v1-abstract-full').style.display = 'none'; document.getElementById('1911.05796v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-FN-1092-SCD </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>