CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;14 of 14 results for author: <span class="mathjax">Walmsley, M</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Walmsley%2C+M">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Walmsley, M"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Walmsley%2C+M&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Walmsley, M"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.14048">arXiv:2501.14048</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.14048">pdf</a>, <a href="https://arxiv.org/format/2501.14048">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SIDDA: SInkhorn Dynamic Domain Adaptation for Image Classification with Equivariant Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pandya%2C+S">Sneh Pandya</a>, <a href="/search/cs?searchtype=author&amp;query=Patel%2C+P">Purvik Patel</a>, <a href="/search/cs?searchtype=author&amp;query=Nord%2C+B+D">Brian D. Nord</a>, <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=%C4%86iprijanovi%C4%87%2C+A">Aleksandra 膯iprijanovi膰</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.14048v1-abstract-short" style="display: inline;"> Modern neural networks (NNs) often do not generalize well in the presence of a &#34;covariate shift&#34;; that is, in situations where the training and test data distributions differ, but the conditional distribution of classification labels remains unchanged. In such cases, NN generalization can be reduced to a problem of learning more domain-invariant features. Domain adaptation (DA) methods include a r&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.14048v1-abstract-full').style.display = 'inline'; document.getElementById('2501.14048v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.14048v1-abstract-full" style="display: none;"> Modern neural networks (NNs) often do not generalize well in the presence of a &#34;covariate shift&#34;; that is, in situations where the training and test data distributions differ, but the conditional distribution of classification labels remains unchanged. In such cases, NN generalization can be reduced to a problem of learning more domain-invariant features. Domain adaptation (DA) methods include a range of techniques aimed at achieving this; however, these methods have struggled with the need for extensive hyperparameter tuning, which then incurs significant computational costs. In this work, we introduce SIDDA, an out-of-the-box DA training algorithm built upon the Sinkhorn divergence, that can achieve effective domain alignment with minimal hyperparameter tuning and computational overhead. We demonstrate the efficacy of our method on multiple simulated and real datasets of varying complexity, including simple shapes, handwritten digits, and real astronomical observations. SIDDA is compatible with a variety of NN architectures, and it works particularly well in improving classification accuracy and model calibration when paired with equivariant neural networks (ENNs). We find that SIDDA enhances the generalization capabilities of NNs, achieving up to a $\approx40\%$ improvement in classification accuracy on unlabeled target data. We also study the efficacy of DA on ENNs with respect to the varying group orders of the dihedral group $D_N$, and find that the model performance improves as the degree of equivariance increases. Finally, we find that SIDDA enhances model calibration on both source and target data--achieving over an order of magnitude improvement in the ECE and Brier score. SIDDA&#39;s versatility, combined with its automated approach to domain alignment, has the potential to advance multi-dataset studies by enabling the development of highly generalizable models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.14048v1-abstract-full').style.display = 'none'; document.getElementById('2501.14048v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25 pages, 5 figures, 4 tables. code available at: https://github.com/deepskies/SIDDA</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> FERMILAB-PUB-25-0031-CSAID </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.01556">arXiv:2408.01556</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.01556">pdf</a>, <a href="https://arxiv.org/format/2408.01556">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Digital Libraries">cs.DL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.3847/1538-4365/ad7c43">10.3847/1538-4365/ad7c43 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> pathfinder: A Semantic Framework for Literature Review and Knowledge Discovery in Astronomy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Iyer%2C+K+G">Kartheik G. Iyer</a>, <a href="/search/cs?searchtype=author&amp;query=Yunus%2C+M">Mikaeel Yunus</a>, <a href="/search/cs?searchtype=author&amp;query=O%27Neill%2C+C">Charles O&#39;Neill</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+C">Christine Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Hyk%2C+A">Alina Hyk</a>, <a href="/search/cs?searchtype=author&amp;query=McCormick%2C+K">Kiera McCormick</a>, <a href="/search/cs?searchtype=author&amp;query=Ciuca%2C+I">Ioana Ciuca</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+J+F">John F. Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Accomazzi%2C+A">Alberto Accomazzi</a>, <a href="/search/cs?searchtype=author&amp;query=Astarita%2C+S">Simone Astarita</a>, <a href="/search/cs?searchtype=author&amp;query=Chakrabarty%2C+R">Rishabh Chakrabarty</a>, <a href="/search/cs?searchtype=author&amp;query=Cranney%2C+J">Jesse Cranney</a>, <a href="/search/cs?searchtype=author&amp;query=Field%2C+A">Anjalie Field</a>, <a href="/search/cs?searchtype=author&amp;query=Ghosal%2C+T">Tirthankar Ghosal</a>, <a href="/search/cs?searchtype=author&amp;query=Ginolfi%2C+M">Michele Ginolfi</a>, <a href="/search/cs?searchtype=author&amp;query=Huertas-Company%2C+M">Marc Huertas-Company</a>, <a href="/search/cs?searchtype=author&amp;query=Jablonska%2C+M">Maja Jablonska</a>, <a href="/search/cs?searchtype=author&amp;query=Kruk%2C+S">Sandor Kruk</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Huiling Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Marchidan%2C+G">Gabriel Marchidan</a>, <a href="/search/cs?searchtype=author&amp;query=Mistry%2C+R">Rohit Mistry</a>, <a href="/search/cs?searchtype=author&amp;query=Naiman%2C+J+P">J. P. Naiman</a>, <a href="/search/cs?searchtype=author&amp;query=Peek%2C+J+E+G">J. E. G. Peek</a>, <a href="/search/cs?searchtype=author&amp;query=Polimera%2C+M">Mugdha Polimera</a>, <a href="/search/cs?searchtype=author&amp;query=Rodriguez%2C+S+J">Sergio J. Rodriguez</a> , et al. (5 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.01556v1-abstract-short" style="display: inline;"> The exponential growth of astronomical literature poses significant challenges for researchers navigating and synthesizing general insights or even domain-specific knowledge. We present Pathfinder, a machine learning framework designed to enable literature review and knowledge discovery in astronomy, focusing on semantic searching with natural language instead of syntactic searches with keywords.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.01556v1-abstract-full').style.display = 'inline'; document.getElementById('2408.01556v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.01556v1-abstract-full" style="display: none;"> The exponential growth of astronomical literature poses significant challenges for researchers navigating and synthesizing general insights or even domain-specific knowledge. We present Pathfinder, a machine learning framework designed to enable literature review and knowledge discovery in astronomy, focusing on semantic searching with natural language instead of syntactic searches with keywords. Utilizing state-of-the-art large language models (LLMs) and a corpus of 350,000 peer-reviewed papers from the Astrophysics Data System (ADS), Pathfinder offers an innovative approach to scientific inquiry and literature exploration. Our framework couples advanced retrieval techniques with LLM-based synthesis to search astronomical literature by semantic context as a complement to currently existing methods that use keywords or citation graphs. It addresses complexities of jargon, named entities, and temporal aspects through time-based and citation-based weighting schemes. We demonstrate the tool&#39;s versatility through case studies, showcasing its application in various research scenarios. The system&#39;s performance is evaluated using custom benchmarks, including single-paper and multi-paper tasks. Beyond literature review, Pathfinder offers unique capabilities for reformatting answers in ways that are accessible to various audiences (e.g. in a different language or as simplified text), visualizing research landscapes, and tracking the impact of observatories and methodologies. This tool represents a significant advancement in applying AI to astronomical research, aiding researchers at all career stages in navigating modern astronomy literature. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.01556v1-abstract-full').style.display = 'none'; document.getElementById('2408.01556v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25 pages, 9 figures, submitted to AAS jorunals. Comments are welcome, and the tools mentioned are available online at https://pfdr.app</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.02973">arXiv:2404.02973</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.02973">pdf</a>, <a href="https://arxiv.org/format/2404.02973">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> </div> </div> <p class="title is-5 mathjax"> Scaling Laws for Galaxy Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=Bowles%2C+M">Micah Bowles</a>, <a href="/search/cs?searchtype=author&amp;query=Scaife%2C+A+M+M">Anna M. M. Scaife</a>, <a href="/search/cs?searchtype=author&amp;query=Makechemu%2C+J+S">Jason Shingirai Makechemu</a>, <a href="/search/cs?searchtype=author&amp;query=Gordon%2C+A+J">Alexander J. Gordon</a>, <a href="/search/cs?searchtype=author&amp;query=Ferguson%2C+A+M+N">Annette M. N. Ferguson</a>, <a href="/search/cs?searchtype=author&amp;query=Mann%2C+R+G">Robert G. Mann</a>, <a href="/search/cs?searchtype=author&amp;query=Pearson%2C+J">James Pearson</a>, <a href="/search/cs?searchtype=author&amp;query=Popp%2C+J+J">J眉rgen J. Popp</a>, <a href="/search/cs?searchtype=author&amp;query=Bovy%2C+J">Jo Bovy</a>, <a href="/search/cs?searchtype=author&amp;query=Speagle%2C+J">Josh Speagle</a>, <a href="/search/cs?searchtype=author&amp;query=Dickinson%2C+H">Hugh Dickinson</a>, <a href="/search/cs?searchtype=author&amp;query=Fortson%2C+L">Lucy Fortson</a>, <a href="/search/cs?searchtype=author&amp;query=G%C3%A9ron%2C+T">Tobias G茅ron</a>, <a href="/search/cs?searchtype=author&amp;query=Kruk%2C+S">Sandor Kruk</a>, <a href="/search/cs?searchtype=author&amp;query=Lintott%2C+C+J">Chris J. Lintott</a>, <a href="/search/cs?searchtype=author&amp;query=Mantha%2C+K">Kameswara Mantha</a>, <a href="/search/cs?searchtype=author&amp;query=Mohan%2C+D">Devina Mohan</a>, <a href="/search/cs?searchtype=author&amp;query=O%27Ryan%2C+D">David O&#39;Ryan</a>, <a href="/search/cs?searchtype=author&amp;query=Slijepevic%2C+I+V">Inigo V. Slijepevic</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.02973v1-abstract-short" style="display: inline;"> We present the first systematic investigation of supervised scaling laws outside of an ImageNet-like context - on images of galaxies. We use 840k galaxy images and over 100M annotations by Galaxy Zoo volunteers, comparable in scale to Imagenet-1K. We find that adding annotated galaxy images provides a power law improvement in performance across all architectures and all tasks, while adding trainab&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.02973v1-abstract-full').style.display = 'inline'; document.getElementById('2404.02973v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.02973v1-abstract-full" style="display: none;"> We present the first systematic investigation of supervised scaling laws outside of an ImageNet-like context - on images of galaxies. We use 840k galaxy images and over 100M annotations by Galaxy Zoo volunteers, comparable in scale to Imagenet-1K. We find that adding annotated galaxy images provides a power law improvement in performance across all architectures and all tasks, while adding trainable parameters is effective only for some (typically more subjectively challenging) tasks. We then compare the downstream performance of finetuned models pretrained on either ImageNet-12k alone vs. additionally pretrained on our galaxy images. We achieve an average relative error rate reduction of 31% across 5 downstream tasks of scientific interest. Our finetuned models are more label-efficient and, unlike their ImageNet-12k-pretrained equivalents, often achieve linear transfer performance equal to that of end-to-end finetuning. We find relatively modest additional downstream benefits from scaling model size, implying that scaling alone is not sufficient to address our domain gap, and suggest that practitioners with qualitatively different images might benefit more from in-domain adaption followed by targeted downstream labelling. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.02973v1-abstract-full').style.display = 'none'; document.getElementById('2404.02973v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10+6 pages, 12 figures. Appendix C2 based on arxiv:2206.11927. Code, demos, documentation at https://github.com/mwalmsley/zoobot</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.02910">arXiv:2312.02910</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.02910">pdf</a>, <a href="https://arxiv.org/format/2312.02910">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Rare Galaxy Classes Identified In Foundation Model Representations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=Scaife%2C+A+M+M">Anna M. M. Scaife</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.02910v1-abstract-short" style="display: inline;"> We identify rare and visually distinctive galaxy populations by searching for structure within the learned representations of pretrained models. We show that these representations arrange galaxies by appearance in patterns beyond those needed to predict the pretraining labels. We design a clustering approach to isolate specific local patterns, revealing groups of galaxies with rare and scientifica&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02910v1-abstract-full').style.display = 'inline'; document.getElementById('2312.02910v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.02910v1-abstract-full" style="display: none;"> We identify rare and visually distinctive galaxy populations by searching for structure within the learned representations of pretrained models. We show that these representations arrange galaxies by appearance in patterns beyond those needed to predict the pretraining labels. We design a clustering approach to isolate specific local patterns, revealing groups of galaxies with rare and scientifically-interesting morphologies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02910v1-abstract-full').style.display = 'none'; document.getElementById('2312.02910v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at Machine Learning and the Physical Sciences Workshop, NeurIPS 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.02908">arXiv:2312.02908</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.02908">pdf</a>, <a href="https://arxiv.org/format/2312.02908">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning Segmentation of Spiral Arms and Bars </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=Spindler%2C+A">Ashley Spindler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.02908v1-abstract-short" style="display: inline;"> We present the first deep learning model for segmenting galactic spiral arms and bars. In a blinded assessment by expert astronomers, our predicted spiral arm masks are preferred over both current automated methods (99% of evaluations) and our original volunteer labels (79% of evaluations). Experts rated our spiral arm masks as `mostly good&#39; to `perfect&#39; in 89% of evaluations. Bar lengths triviall&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02908v1-abstract-full').style.display = 'inline'; document.getElementById('2312.02908v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.02908v1-abstract-full" style="display: none;"> We present the first deep learning model for segmenting galactic spiral arms and bars. In a blinded assessment by expert astronomers, our predicted spiral arm masks are preferred over both current automated methods (99% of evaluations) and our original volunteer labels (79% of evaluations). Experts rated our spiral arm masks as `mostly good&#39; to `perfect&#39; in 89% of evaluations. Bar lengths trivially derived from our predicted bar masks are in excellent agreement with a dedicated crowdsourcing project. The pixelwise precision of our masks, previously impossible at scale, will underpin new research into how spiral arms and bars evolve. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02908v1-abstract-full').style.display = 'none'; document.getElementById('2312.02908v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at Machine Learning and the Physical Sciences Workshop, NeurIPS 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.12528">arXiv:2310.12528</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.12528">pdf</a>, <a href="https://arxiv.org/format/2310.12528">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Constructing Impactful Machine Learning Research for Astronomy: Best Practices for Researchers and Reviewers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huppenkothen%2C+D">D. Huppenkothen</a>, <a href="/search/cs?searchtype=author&amp;query=Ntampaka%2C+M">M. Ntampaka</a>, <a href="/search/cs?searchtype=author&amp;query=Ho%2C+M">M. Ho</a>, <a href="/search/cs?searchtype=author&amp;query=Fouesneau%2C+M">M. Fouesneau</a>, <a href="/search/cs?searchtype=author&amp;query=Nord%2C+B">B. Nord</a>, <a href="/search/cs?searchtype=author&amp;query=Peek%2C+J+E+G">J. E. G. Peek</a>, <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">M. Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+J+F">J. F. Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Avestruz%2C+C">C. Avestruz</a>, <a href="/search/cs?searchtype=author&amp;query=Buck%2C+T">T. Buck</a>, <a href="/search/cs?searchtype=author&amp;query=Brescia%2C+M">M. Brescia</a>, <a href="/search/cs?searchtype=author&amp;query=Finkbeiner%2C+D+P">D. P. Finkbeiner</a>, <a href="/search/cs?searchtype=author&amp;query=Goulding%2C+A+D">A. D. Goulding</a>, <a href="/search/cs?searchtype=author&amp;query=Kacprzak%2C+T">T. Kacprzak</a>, <a href="/search/cs?searchtype=author&amp;query=Melchior%2C+P">P. Melchior</a>, <a href="/search/cs?searchtype=author&amp;query=Pasquato%2C+M">M. Pasquato</a>, <a href="/search/cs?searchtype=author&amp;query=Ramachandra%2C+N">N. Ramachandra</a>, <a href="/search/cs?searchtype=author&amp;query=Ting%2C+Y+-">Y. -S. Ting</a>, <a href="/search/cs?searchtype=author&amp;query=van+de+Ven%2C+G">G. van de Ven</a>, <a href="/search/cs?searchtype=author&amp;query=Villar%2C+S">S. Villar</a>, <a href="/search/cs?searchtype=author&amp;query=Villar%2C+V+A">V. A. Villar</a>, <a href="/search/cs?searchtype=author&amp;query=Zinger%2C+E">E. Zinger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.12528v1-abstract-short" style="display: inline;"> Machine learning has rapidly become a tool of choice for the astronomical community. It is being applied across a wide range of wavelengths and problems, from the classification of transients to neural network emulators of cosmological simulations, and is shifting paradigms about how we generate and report scientific results. At the same time, this class of method comes with its own set of best pr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.12528v1-abstract-full').style.display = 'inline'; document.getElementById('2310.12528v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.12528v1-abstract-full" style="display: none;"> Machine learning has rapidly become a tool of choice for the astronomical community. It is being applied across a wide range of wavelengths and problems, from the classification of transients to neural network emulators of cosmological simulations, and is shifting paradigms about how we generate and report scientific results. At the same time, this class of method comes with its own set of best practices, challenges, and drawbacks, which, at present, are often reported on incompletely in the astrophysical literature. With this paper, we aim to provide a primer to the astronomical community, including authors, reviewers, and editors, on how to implement machine learning models and report their results in a way that ensures the accuracy of the results, reproducibility of the findings, and usefulness of the method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.12528v1-abstract-full').style.display = 'none'; document.getElementById('2310.12528v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 3 figures; submitted to the Bulletin of the American Astronomical Society</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.14760">arXiv:2210.14760</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2210.14760">pdf</a>, <a href="https://arxiv.org/format/2210.14760">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> A New Task: Deriving Semantic Class Targets for the Physical Sciences </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bowles%2C+M">Micah Bowles</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+H">Hongming Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Vardoulaki%2C+E">Eleni Vardoulaki</a>, <a href="/search/cs?searchtype=author&amp;query=Alexander%2C+E+L">Emma L. Alexander</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+Y">Yan Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Rudnick%2C+L">Lawrence Rudnick</a>, <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=Porter%2C+F">Fiona Porter</a>, <a href="/search/cs?searchtype=author&amp;query=Scaife%2C+A+M+M">Anna M. M. Scaife</a>, <a href="/search/cs?searchtype=author&amp;query=Slijepcevic%2C+I+V">Inigo Val Slijepcevic</a>, <a href="/search/cs?searchtype=author&amp;query=Segal%2C+G">Gary Segal</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.14760v2-abstract-short" style="display: inline;"> We define deriving semantic class targets as a novel multi-modal task. By doing so, we aim to improve classification schemes in the physical sciences which can be severely abstracted and obfuscating. We address this task for upcoming radio astronomy surveys and present the derived semantic radio galaxy morphology class targets. </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.14760v2-abstract-full" style="display: none;"> We define deriving semantic class targets as a novel multi-modal task. By doing so, we aim to improve classification schemes in the physical sciences which can be severely abstracted and obfuscating. We address this task for upcoming radio astronomy surveys and present the derived semantic radio galaxy morphology class targets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.14760v2-abstract-full').style.display = 'none'; document.getElementById('2210.14760v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 1 figure, Accepted at Fifth Workshop on Machine Learning and the Physical Sciences (NeurIPS 2022), Neural Information Processing Systems 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.11927">arXiv:2206.11927</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.11927">pdf</a>, <a href="https://arxiv.org/format/2206.11927">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> </div> </div> <p class="title is-5 mathjax"> Towards Galaxy Foundation Models with Hybrid Contrastive Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=Slijepcevic%2C+I+V">Inigo Val Slijepcevic</a>, <a href="/search/cs?searchtype=author&amp;query=Bowles%2C+M">Micah Bowles</a>, <a href="/search/cs?searchtype=author&amp;query=Scaife%2C+A+M+M">Anna M. M. Scaife</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.11927v1-abstract-short" style="display: inline;"> New astronomical tasks are often related to earlier tasks for which labels have already been collected. We adapt the contrastive framework BYOL to leverage those labels as a pretraining task while also enforcing augmentation invariance. For large-scale pretraining, we introduce GZ-Evo v0.1, a set of 96.5M volunteer responses for 552k galaxy images plus a further 1.34M comparable unlabelled galaxie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.11927v1-abstract-full').style.display = 'inline'; document.getElementById('2206.11927v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.11927v1-abstract-full" style="display: none;"> New astronomical tasks are often related to earlier tasks for which labels have already been collected. We adapt the contrastive framework BYOL to leverage those labels as a pretraining task while also enforcing augmentation invariance. For large-scale pretraining, we introduce GZ-Evo v0.1, a set of 96.5M volunteer responses for 552k galaxy images plus a further 1.34M comparable unlabelled galaxies. Most of the 206 GZ-Evo answers are unknown for any given galaxy, and so our pretraining task uses a Dirichlet loss that naturally handles unknown answers. GZ-Evo pretraining, with or without hybrid learning, improves on direct training even with plentiful downstream labels (+4% accuracy with 44k labels). Our hybrid pretraining/contrastive method further improves downstream accuracy vs. pretraining or contrastive learning, especially in the low-label transfer regime (+6% accuracy with 750 labels). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.11927v1-abstract-full').style.display = 'none'; document.getElementById('2206.11927v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at the ICML 2022 Workshop on Machine Learning for Astrophysics. Data: www.github.com/mwalmsley/pytorch-galaxy-datasets. Please reach out to share your labelled data - all contributions will be credited in future work</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.08816">arXiv:2204.08816</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2204.08816">pdf</a>, <a href="https://arxiv.org/format/2204.08816">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1093/mnras/stac1135">10.1093/mnras/stac1135 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Radio Galaxy Zoo: Using semi-supervised learning to leverage large unlabelled data-sets for radio galaxy classification under data-set shift </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Slijepcevic%2C+I+V">Inigo V. Slijepcevic</a>, <a href="/search/cs?searchtype=author&amp;query=Scaife%2C+A+M+M">Anna M. M. Scaife</a>, <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=Bowles%2C+M">Micah Bowles</a>, <a href="/search/cs?searchtype=author&amp;query=Wong%2C+I">Ivy Wong</a>, <a href="/search/cs?searchtype=author&amp;query=Shabala%2C+S+S">Stanislav S. Shabala</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+H">Hongming Tang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.08816v4-abstract-short" style="display: inline;"> In this work we examine the classification accuracy and robustness of a state-of-the-art semi-supervised learning (SSL) algorithm applied to the morphological classification of radio galaxies. We test if SSL with fewer labels can achieve test accuracies comparable to the supervised state-of-the-art and whether this holds when incorporating previously unseen data. We find that for the radio galaxy&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.08816v4-abstract-full').style.display = 'inline'; document.getElementById('2204.08816v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.08816v4-abstract-full" style="display: none;"> In this work we examine the classification accuracy and robustness of a state-of-the-art semi-supervised learning (SSL) algorithm applied to the morphological classification of radio galaxies. We test if SSL with fewer labels can achieve test accuracies comparable to the supervised state-of-the-art and whether this holds when incorporating previously unseen data. We find that for the radio galaxy classification problem considered, SSL provides additional regularisation and outperforms the baseline test accuracy. However, in contrast to model performance metrics reported on computer science benchmarking data-sets, we find that improvement is limited to a narrow range of label volumes, with performance falling off rapidly at low label volumes. Additionally, we show that SSL does not improve model calibration, regardless of whether classification is improved. Moreover, we find that when different underlying catalogues drawn from the same radio survey are used to provide the labelled and unlabelled data-sets required for SSL, a significant drop in classification performance is observered, highlighting the difficulty of applying SSL techniques under dataset shift. We show that a class-imbalanced unlabelled data pool negatively affects performance through prior probability shift, which we suggest may explain this performance drop, and that using the Frechet Distance between labelled and unlabelled data-sets as a measure of data-set shift can provide a prediction of model performance, but that for typical radio galaxy data-sets with labelled sample volumes of O(1000), the sample variance associated with this technique is high and the technique is in general not sufficiently robust to replace a train-test cycle. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.08816v4-abstract-full').style.display = 'none'; document.getElementById('2204.08816v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to MNRAS. 14 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.01203">arXiv:2201.01203</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.01203">pdf</a>, <a href="https://arxiv.org/format/2201.01203">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cosmology and Nongalactic Astrophysics">astro-ph.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1093/mnras/stac223">10.1093/mnras/stac223 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Quantifying Uncertainty in Deep Learning Approaches to Radio Galaxy Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mohan%2C+D">Devina Mohan</a>, <a href="/search/cs?searchtype=author&amp;query=Scaife%2C+A+M+M">Anna M. M. Scaife</a>, <a href="/search/cs?searchtype=author&amp;query=Porter%2C+F">Fiona Porter</a>, <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=Bowles%2C+M">Micah Bowles</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.01203v2-abstract-short" style="display: inline;"> In this work we use variational inference to quantify the degree of uncertainty in deep learning model predictions of radio galaxy classification. We show that the level of model posterior variance for individual test samples is correlated with human uncertainty when labelling radio galaxies. We explore the model performance and uncertainty calibration for different weight priors and suggest that&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.01203v2-abstract-full').style.display = 'inline'; document.getElementById('2201.01203v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.01203v2-abstract-full" style="display: none;"> In this work we use variational inference to quantify the degree of uncertainty in deep learning model predictions of radio galaxy classification. We show that the level of model posterior variance for individual test samples is correlated with human uncertainty when labelling radio galaxies. We explore the model performance and uncertainty calibration for different weight priors and suggest that a sparse prior produces more well-calibrated uncertainty estimates. Using the posterior distributions for individual weights, we demonstrate that we can prune 30% of the fully-connected layer weights without significant loss of performance by removing the weights with the lowest signal-to-noise ratio. A larger degree of pruning can be achieved using a Fisher information based ranking, but both pruning methods affect the uncertainty calibration for Fanaroff-Riley type I and type II radio galaxies differently. Like other work in this field, we experience a cold posterior effect, whereby the posterior must be down-weighted to achieve good predictive performance. We examine whether adapting the cost function to accommodate model misspecification can compensate for this effect, but find that it does not make a significant difference. We also examine the effect of principled data augmentation and find that this improves upon the baseline but also does not compensate for the observed effect. We interpret this as the cold posterior effect being due to the overly effective curation of our training sample leading to likelihood misspecification, and raise this as a potential issue for Bayesian deep learning approaches to radio galaxy classification in future. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.01203v2-abstract-full').style.display = 'none'; document.getElementById('2201.01203v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted by MNRAS</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.12735">arXiv:2110.12735</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.12735">pdf</a>, <a href="https://arxiv.org/format/2110.12735">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1093/mnras/stac525">10.1093/mnras/stac525 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Practical Galaxy Morphology Tools from Deep Supervised Representation Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=Scaife%2C+A+M+M">Anna M. M. Scaife</a>, <a href="/search/cs?searchtype=author&amp;query=Lintott%2C+C">Chris Lintott</a>, <a href="/search/cs?searchtype=author&amp;query=Lochner%2C+M">Michelle Lochner</a>, <a href="/search/cs?searchtype=author&amp;query=Etsebeth%2C+V">Verlon Etsebeth</a>, <a href="/search/cs?searchtype=author&amp;query=G%C3%A9ron%2C+T">Tobias G茅ron</a>, <a href="/search/cs?searchtype=author&amp;query=Dickinson%2C+H">Hugh Dickinson</a>, <a href="/search/cs?searchtype=author&amp;query=Fortson%2C+L">Lucy Fortson</a>, <a href="/search/cs?searchtype=author&amp;query=Kruk%2C+S">Sandor Kruk</a>, <a href="/search/cs?searchtype=author&amp;query=Masters%2C+K+L">Karen L. Masters</a>, <a href="/search/cs?searchtype=author&amp;query=Mantha%2C+K+B">Kameswara Bharadwaj Mantha</a>, <a href="/search/cs?searchtype=author&amp;query=Simmons%2C+B+D">Brooke D. Simmons</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.12735v2-abstract-short" style="display: inline;"> Astronomers have typically set out to solve supervised machine learning problems by creating their own representations from scratch. We show that deep learning models trained to answer every Galaxy Zoo DECaLS question learn meaningful semantic representations of galaxies that are useful for new tasks on which the models were never trained. We exploit these representations to outperform several rec&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.12735v2-abstract-full').style.display = 'inline'; document.getElementById('2110.12735v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.12735v2-abstract-full" style="display: none;"> Astronomers have typically set out to solve supervised machine learning problems by creating their own representations from scratch. We show that deep learning models trained to answer every Galaxy Zoo DECaLS question learn meaningful semantic representations of galaxies that are useful for new tasks on which the models were never trained. We exploit these representations to outperform several recent approaches at practical tasks crucial for investigating large galaxy samples. The first task is identifying galaxies of similar morphology to a query galaxy. Given a single galaxy assigned a free text tag by humans (e.g. &#34;#diffuse&#34;), we can find galaxies matching that tag for most tags. The second task is identifying the most interesting anomalies to a particular researcher. Our approach is 100% accurate at identifying the most interesting 100 anomalies (as judged by Galaxy Zoo 2 volunteers). The third task is adapting a model to solve a new task using only a small number of newly-labelled galaxies. Models fine-tuned from our representation are better able to identify ring galaxies than models fine-tuned from terrestrial images (ImageNet) or trained from scratch. We solve each task with very few new labels; either one (for the similarity search) or several hundred (for anomaly detection or fine-tuning). This challenges the longstanding view that deep supervised methods require new large labelled datasets for practical use in astronomy. To help the community benefit from our pretrained models, we release our fine-tuning code Zoobot. Zoobot is accessible to researchers with no prior experience in deep learning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.12735v2-abstract-full').style.display = 'none'; document.getElementById('2110.12735v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages plus appendix. Accepted to MNRAS (open-access DOI below). Code, documentation, pretrained models: https://github.com/mwalmsley/zoobot (PyTorch and TensorFlow)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> MNRAS Volume 513, Issue 2, June 2022, Pages 1581-1599 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.14961">arXiv:2104.14961</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2104.14961">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Revisiting Citizen Science Through the Lens of Hybrid Intelligence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Rafner%2C+J">Janet Rafner</a>, <a href="/search/cs?searchtype=author&amp;query=Gajdacz%2C+M">Miroslav Gajdacz</a>, <a href="/search/cs?searchtype=author&amp;query=Kragh%2C+G">Gitte Kragh</a>, <a href="/search/cs?searchtype=author&amp;query=Hjorth%2C+A">Arthur Hjorth</a>, <a href="/search/cs?searchtype=author&amp;query=Gander%2C+A">Anna Gander</a>, <a href="/search/cs?searchtype=author&amp;query=Palfi%2C+B">Blanka Palfi</a>, <a href="/search/cs?searchtype=author&amp;query=Berditchevskaia%2C+A">Aleks Berditchevskaia</a>, <a href="/search/cs?searchtype=author&amp;query=Grey%2C+F">Fran莽ois Grey</a>, <a href="/search/cs?searchtype=author&amp;query=Gal%2C+K">Kobi Gal</a>, <a href="/search/cs?searchtype=author&amp;query=Segal%2C+A">Avi Segal</a>, <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=Miller%2C+J+A">Josh Aaron Miller</a>, <a href="/search/cs?searchtype=author&amp;query=Dellerman%2C+D">Dominik Dellerman</a>, <a href="/search/cs?searchtype=author&amp;query=Haklay%2C+M">Muki Haklay</a>, <a href="/search/cs?searchtype=author&amp;query=Michelucci%2C+P">Pietro Michelucci</a>, <a href="/search/cs?searchtype=author&amp;query=Sherson%2C+J">Jacob Sherson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.14961v1-abstract-short" style="display: inline;"> Artificial Intelligence (AI) can augment and sometimes even replace human cognition. Inspired by efforts to value human agency alongside productivity, we discuss the benefits of solving Citizen Science (CS) tasks with Hybrid Intelligence (HI), a synergetic mixture of human and artificial intelligence. Currently there is no clear framework or methodology on how to create such an effective mixture.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.14961v1-abstract-full').style.display = 'inline'; document.getElementById('2104.14961v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.14961v1-abstract-full" style="display: none;"> Artificial Intelligence (AI) can augment and sometimes even replace human cognition. Inspired by efforts to value human agency alongside productivity, we discuss the benefits of solving Citizen Science (CS) tasks with Hybrid Intelligence (HI), a synergetic mixture of human and artificial intelligence. Currently there is no clear framework or methodology on how to create such an effective mixture. Due to the unique participant-centered set of values and the abundance of tasks drawing upon both human common sense and complex 21st century skills, we believe that the field of CS offers an invaluable testbed for the development of HI and human-centered AI of the 21st century, while benefiting CS as well. In order to investigate this potential, we first relate CS to adjacent computational disciplines. Then, we demonstrate that CS projects can be grouped according to their potential for HI-enhancement by examining two key dimensions: the level of digitization and the amount of knowledge or experience required for participation. Finally, we propose a framework for types of human-AI interaction in CS based on established criteria of HI. This &#34;HI lens&#34; provides the CS community with an overview of several ways to utilize the combination of AI and human intelligence in their projects. It also allows the AI community to gain ideas on how developing AI in CS projects can further their own field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.14961v1-abstract-full').style.display = 'none'; document.getElementById('2104.14961v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.08414">arXiv:2102.08414</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.08414">pdf</a>, <a href="https://arxiv.org/format/2102.08414">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1093/mnras/stab2093">10.1093/mnras/stab2093 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Galaxy Zoo DECaLS: Detailed Visual Morphology Measurements from Volunteers and Deep Learning for 314,000 Galaxies </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=Lintott%2C+C">Chris Lintott</a>, <a href="/search/cs?searchtype=author&amp;query=Geron%2C+T">Tobias Geron</a>, <a href="/search/cs?searchtype=author&amp;query=Kruk%2C+S">Sandor Kruk</a>, <a href="/search/cs?searchtype=author&amp;query=Krawczyk%2C+C">Coleman Krawczyk</a>, <a href="/search/cs?searchtype=author&amp;query=Willett%2C+K+W">Kyle W. Willett</a>, <a href="/search/cs?searchtype=author&amp;query=Bamford%2C+S">Steven Bamford</a>, <a href="/search/cs?searchtype=author&amp;query=Kelvin%2C+L+S">Lee S. Kelvin</a>, <a href="/search/cs?searchtype=author&amp;query=Fortson%2C+L">Lucy Fortson</a>, <a href="/search/cs?searchtype=author&amp;query=Gal%2C+Y">Yarin Gal</a>, <a href="/search/cs?searchtype=author&amp;query=Keel%2C+W">William Keel</a>, <a href="/search/cs?searchtype=author&amp;query=Masters%2C+K+L">Karen L. Masters</a>, <a href="/search/cs?searchtype=author&amp;query=Mehta%2C+V">Vihang Mehta</a>, <a href="/search/cs?searchtype=author&amp;query=Simmons%2C+B+D">Brooke D. Simmons</a>, <a href="/search/cs?searchtype=author&amp;query=Smethurst%2C+R">Rebecca Smethurst</a>, <a href="/search/cs?searchtype=author&amp;query=Smith%2C+L">Lewis Smith</a>, <a href="/search/cs?searchtype=author&amp;query=Baeten%2C+E+M">Elisabeth M. Baeten</a>, <a href="/search/cs?searchtype=author&amp;query=Macmillan%2C+C">Christine Macmillan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.08414v2-abstract-short" style="display: inline;"> We present Galaxy Zoo DECaLS: detailed visual morphological classifications for Dark Energy Camera Legacy Survey images of galaxies within the SDSS DR8 footprint. Deeper DECaLS images (r=23.6 vs. r=22.2 from SDSS) reveal spiral arms, weak bars, and tidal features not previously visible in SDSS imaging. To best exploit the greater depth of DECaLS images, volunteers select from a new set of answers&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.08414v2-abstract-full').style.display = 'inline'; document.getElementById('2102.08414v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.08414v2-abstract-full" style="display: none;"> We present Galaxy Zoo DECaLS: detailed visual morphological classifications for Dark Energy Camera Legacy Survey images of galaxies within the SDSS DR8 footprint. Deeper DECaLS images (r=23.6 vs. r=22.2 from SDSS) reveal spiral arms, weak bars, and tidal features not previously visible in SDSS imaging. To best exploit the greater depth of DECaLS images, volunteers select from a new set of answers designed to improve our sensitivity to mergers and bars. Galaxy Zoo volunteers provide 7.5 million individual classifications over 314,000 galaxies. 140,000 galaxies receive at least 30 classifications, sufficient to accurately measure detailed morphology like bars, and the remainder receive approximately 5. All classifications are used to train an ensemble of Bayesian convolutional neural networks (a state-of-the-art deep learning method) to predict posteriors for the detailed morphology of all 314,000 galaxies. When measured against confident volunteer classifications, the networks are approximately 99% accurate on every question. Morphology is a fundamental feature of every galaxy; our human and machine classifications are an accurate and detailed resource for understanding how galaxies evolve. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.08414v2-abstract-full').style.display = 'none'; document.getElementById('2102.08414v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by MNRAS July &#39;21. Open access DOI below. Data at https://doi.org/10.5281/zenodo.4196266. Code at https://www.github.com/mwalmsley/zoobot. Docs at https://zoobot.readthedocs.io/. Interactive viewer at https://share.streamlit.io/mwalmsley/galaxy-poster/gz_decals_mike_walmsley.py</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1905.07424">arXiv:1905.07424</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1905.07424">pdf</a>, <a href="https://arxiv.org/format/1905.07424">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1093/mnras/stz2816">10.1093/mnras/stz2816 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Galaxy Zoo: Probabilistic Morphology through Bayesian CNNs and Active Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Walmsley%2C+M">Mike Walmsley</a>, <a href="/search/cs?searchtype=author&amp;query=Smith%2C+L">Lewis Smith</a>, <a href="/search/cs?searchtype=author&amp;query=Lintott%2C+C">Chris Lintott</a>, <a href="/search/cs?searchtype=author&amp;query=Gal%2C+Y">Yarin Gal</a>, <a href="/search/cs?searchtype=author&amp;query=Bamford%2C+S">Steven Bamford</a>, <a href="/search/cs?searchtype=author&amp;query=Dickinson%2C+H">Hugh Dickinson</a>, <a href="/search/cs?searchtype=author&amp;query=Fortson%2C+L">Lucy Fortson</a>, <a href="/search/cs?searchtype=author&amp;query=Kruk%2C+S">Sandor Kruk</a>, <a href="/search/cs?searchtype=author&amp;query=Masters%2C+K">Karen Masters</a>, <a href="/search/cs?searchtype=author&amp;query=Scarlata%2C+C">Claudia Scarlata</a>, <a href="/search/cs?searchtype=author&amp;query=Simmons%2C+B">Brooke Simmons</a>, <a href="/search/cs?searchtype=author&amp;query=Smethurst%2C+R">Rebecca Smethurst</a>, <a href="/search/cs?searchtype=author&amp;query=Wright%2C+D">Darryl Wright</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1905.07424v2-abstract-short" style="display: inline;"> We use Bayesian convolutional neural networks and a novel generative model of Galaxy Zoo volunteer responses to infer posteriors for the visual morphology of galaxies. Bayesian CNN can learn from galaxy images with uncertain labels and then, for previously unlabelled galaxies, predict the probability of each possible label. Our posteriors are well-calibrated (e.g. for predicting bars, we achieve c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1905.07424v2-abstract-full').style.display = 'inline'; document.getElementById('1905.07424v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1905.07424v2-abstract-full" style="display: none;"> We use Bayesian convolutional neural networks and a novel generative model of Galaxy Zoo volunteer responses to infer posteriors for the visual morphology of galaxies. Bayesian CNN can learn from galaxy images with uncertain labels and then, for previously unlabelled galaxies, predict the probability of each possible label. Our posteriors are well-calibrated (e.g. for predicting bars, we achieve coverage errors of 11.8% within a vote fraction deviation of 0.2) and hence are reliable for practical use. Further, using our posteriors, we apply the active learning strategy BALD to request volunteer responses for the subset of galaxies which, if labelled, would be most informative for training our network. We show that training our Bayesian CNNs using active learning requires up to 35-60% fewer labelled galaxies, depending on the morphological feature being classified. By combining human and machine intelligence, Galaxy Zoo will be able to classify surveys of any conceivable scale on a timescale of weeks, providing massive and detailed morphology catalogues to support research into galaxy evolution. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1905.07424v2-abstract-full').style.display = 'none'; document.getElementById('1905.07424v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 May, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by MNRAS. 21 pages, including appendices</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10