CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 63 results for author: <span class="mathjax">Krishnaswamy, S</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Krishnaswamy%2C+S">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Krishnaswamy, S"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Krishnaswamy%2C+S&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Krishnaswamy, S"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Krishnaswamy%2C+S&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Krishnaswamy%2C+S&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Krishnaswamy%2C+S&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12626">arXiv:2411.12626</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12626">pdf</a>, <a href="https://arxiv.org/format/2411.12626">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Exploring the Manifold of Neural Networks Using Diffusion Geometry </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Abel%2C+E">Elliott Abel</a>, <a href="/search/cs?searchtype=author&amp;query=Crevasse%2C+P">Peyton Crevasse</a>, <a href="/search/cs?searchtype=author&amp;query=Grinspan%2C+Y">Yvan Grinspan</a>, <a href="/search/cs?searchtype=author&amp;query=Mazioud%2C+S">Selma Mazioud</a>, <a href="/search/cs?searchtype=author&amp;query=Ogundipe%2C+F">Folu Ogundipe</a>, <a href="/search/cs?searchtype=author&amp;query=Reimann%2C+K">Kristof Reimann</a>, <a href="/search/cs?searchtype=author&amp;query=Schueler%2C+E">Ellie Schueler</a>, <a href="/search/cs?searchtype=author&amp;query=Steindl%2C+A+J">Andrew J. Steindl</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+E">Ellen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Bhaskar%2C+D">Dhananjay Bhaskar</a>, <a href="/search/cs?searchtype=author&amp;query=Viswanath%2C+S">Siddharth Viswanath</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yanlei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Rudner%2C+T+G+J">Tim G. J. Rudner</a>, <a href="/search/cs?searchtype=author&amp;query=Adelstein%2C+I">Ian Adelstein</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12626v1-abstract-short" style="display: inline;"> Drawing motivation from the manifold hypothesis, which posits that most high-dimensional data lies on or near low-dimensional manifolds, we apply manifold learning to the space of neural networks. We learn manifolds where datapoints are neural networks by introducing a distance between the hidden layer representations of the neural networks. These distances are then fed to the non-linear dimension&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12626v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12626v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12626v1-abstract-full" style="display: none;"> Drawing motivation from the manifold hypothesis, which posits that most high-dimensional data lies on or near low-dimensional manifolds, we apply manifold learning to the space of neural networks. We learn manifolds where datapoints are neural networks by introducing a distance between the hidden layer representations of the neural networks. These distances are then fed to the non-linear dimensionality reduction algorithm PHATE to create a manifold of neural networks. We characterize this manifold using features of the representation, including class separation, hierarchical cluster structure, spectral entropy, and topological structure. Our analysis reveals that high-performing networks cluster together in the manifold, displaying consistent embedding patterns across all these features. Finally, we demonstrate the utility of this approach for guiding hyperparameter optimization and neural architecture search by sampling from the manifold. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12626v1-abstract-full').style.display = 'none'; document.getElementById('2411.12626v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.20317">arXiv:2410.20317</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.20317">pdf</a>, <a href="https://arxiv.org/format/2410.20317">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Chemical Physics">physics.chem-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Biomolecules">q-bio.BM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> ProtSCAPE: Mapping the landscape of protein conformations in molecular dynamics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Viswanath%2C+S">Siddharth Viswanath</a>, <a href="/search/cs?searchtype=author&amp;query=Bhaskar%2C+D">Dhananjay Bhaskar</a>, <a href="/search/cs?searchtype=author&amp;query=Johnson%2C+D+R">David R. Johnson</a>, <a href="/search/cs?searchtype=author&amp;query=Rocha%2C+J+F">Joao Felipe Rocha</a>, <a href="/search/cs?searchtype=author&amp;query=Castro%2C+E">Egbert Castro</a>, <a href="/search/cs?searchtype=author&amp;query=Grady%2C+J+D">Jackson D. Grady</a>, <a href="/search/cs?searchtype=author&amp;query=Grigas%2C+A+T">Alex T. Grigas</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M+A">Michael A. Perlmutter</a>, <a href="/search/cs?searchtype=author&amp;query=O%27Hern%2C+C+S">Corey S. O&#39;Hern</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.20317v1-abstract-short" style="display: inline;"> Understanding the dynamic nature of protein structures is essential for comprehending their biological functions. While significant progress has been made in predicting static folded structures, modeling protein motions on microsecond to millisecond scales remains challenging. To address these challenges, we introduce a novel deep learning architecture, Protein Transformer with Scattering, Attenti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20317v1-abstract-full').style.display = 'inline'; document.getElementById('2410.20317v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.20317v1-abstract-full" style="display: none;"> Understanding the dynamic nature of protein structures is essential for comprehending their biological functions. While significant progress has been made in predicting static folded structures, modeling protein motions on microsecond to millisecond scales remains challenging. To address these challenges, we introduce a novel deep learning architecture, Protein Transformer with Scattering, Attention, and Positional Embedding (ProtSCAPE), which leverages the geometric scattering transform alongside transformer-based attention mechanisms to capture protein dynamics from molecular dynamics (MD) simulations. ProtSCAPE utilizes the multi-scale nature of the geometric scattering transform to extract features from protein structures conceptualized as graphs and integrates these features with dual attention structures that focus on residues and amino acid signals, generating latent representations of protein trajectories. Furthermore, ProtSCAPE incorporates a regression head to enforce temporally coherent latent representations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20317v1-abstract-full').style.display = 'none'; document.getElementById('2410.20317v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted as a short paper at the 5th Molecular Machine Learning Conference (MoML 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.14639">arXiv:2410.14639</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.14639">pdf</a>, <a href="https://arxiv.org/format/2410.14639">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Convergence of Manifold Filter-Combine Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Johnson%2C+D+R">David R. Johnson</a>, <a href="/search/cs?searchtype=author&amp;query=Chew%2C+J">Joyce Chew</a>, <a href="/search/cs?searchtype=author&amp;query=Viswanath%2C+S">Siddharth Viswanath</a>, <a href="/search/cs?searchtype=author&amp;query=De+Brouwer%2C+E">Edward De Brouwer</a>, <a href="/search/cs?searchtype=author&amp;query=Needell%2C+D">Deanna Needell</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M">Michael Perlmutter</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.14639v1-abstract-short" style="display: inline;"> In order to better understand manifold neural networks (MNNs), we introduce Manifold Filter-Combine Networks (MFCNs). The filter-combine framework parallels the popular aggregate-combine paradigm for graph neural networks (GNNs) and naturally suggests many interesting families of MNNs which can be interpreted as the manifold analog of various popular GNNs. We then propose a method for implementing&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14639v1-abstract-full').style.display = 'inline'; document.getElementById('2410.14639v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.14639v1-abstract-full" style="display: none;"> In order to better understand manifold neural networks (MNNs), we introduce Manifold Filter-Combine Networks (MFCNs). The filter-combine framework parallels the popular aggregate-combine paradigm for graph neural networks (GNNs) and naturally suggests many interesting families of MNNs which can be interpreted as the manifold analog of various popular GNNs. We then propose a method for implementing MFCNs on high-dimensional point clouds that relies on approximating the manifold by a sparse graph. We prove that our method is consistent in the sense that it converges to a continuum limit as the number of data points tends to infinity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14639v1-abstract-full').style.display = 'none'; document.getElementById('2410.14639v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to NeurIPS Workshop on Symmetry and Geometry in Neural Representations (Extended Abstract Track)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.12779">arXiv:2410.12779</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.12779">pdf</a>, <a href="https://arxiv.org/format/2410.12779">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Differential Geometry">math.DG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Geometry-Aware Generative Autoencoders for Warped Riemannian Metric Learning and Generative Modeling on Data Manifolds </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xingzhi Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Liao%2C+D">Danqi Liao</a>, <a href="/search/cs?searchtype=author&amp;query=MacDonald%2C+K">Kincaid MacDonald</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yanlei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+C">Chen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Huguet%2C+G">Guillaume Huguet</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Adelstein%2C+I">Ian Adelstein</a>, <a href="/search/cs?searchtype=author&amp;query=Rudner%2C+T+G+J">Tim G. J. Rudner</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.12779v2-abstract-short" style="display: inline;"> Rapid growth of high-dimensional datasets in fields such as single-cell RNA sequencing and spatial genomics has led to unprecedented opportunities for scientific discovery, but it also presents unique computational and statistical challenges. Traditional methods struggle with geometry-aware data generation, interpolation along meaningful trajectories, and transporting populations via feasible path&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12779v2-abstract-full').style.display = 'inline'; document.getElementById('2410.12779v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.12779v2-abstract-full" style="display: none;"> Rapid growth of high-dimensional datasets in fields such as single-cell RNA sequencing and spatial genomics has led to unprecedented opportunities for scientific discovery, but it also presents unique computational and statistical challenges. Traditional methods struggle with geometry-aware data generation, interpolation along meaningful trajectories, and transporting populations via feasible paths. To address these issues, we introduce Geometry-Aware Generative Autoencoder (GAGA), a novel framework that combines extensible manifold learning with generative modeling. GAGA constructs a neural network embedding space that respects the intrinsic geometries discovered by manifold learning and learns a novel warped Riemannian metric on the data space. This warped metric is derived from both the points on the data manifold and negative samples off the manifold, allowing it to characterize a meaningful geometry across the entire latent space. Using this metric, GAGA can uniformly sample points on the manifold, generate points along geodesics, and interpolate between populations across the learned manifold using geodesic-guided flows. GAGA shows competitive performance in simulated and real-world datasets, including a 30% improvement over the state-of-the-art methods in single-cell population-level trajectory inference. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12779v2-abstract-full').style.display = 'none'; document.getElementById('2410.12779v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.03058">arXiv:2410.03058</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.03058">pdf</a>, <a href="https://arxiv.org/format/2410.03058">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DiffKillR: Killing and Recreating Diffeomorphisms for Cell Annotation in Dense Microscopy Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+C">Chen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Liao%2C+D">Danqi Liao</a>, <a href="/search/cs?searchtype=author&amp;query=Parada-Mayorga%2C+A">Alejandro Parada-Mayorga</a>, <a href="/search/cs?searchtype=author&amp;query=Ribeiro%2C+A">Alejandro Ribeiro</a>, <a href="/search/cs?searchtype=author&amp;query=DiStasio%2C+M">Marcello DiStasio</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.03058v1-abstract-short" style="display: inline;"> The proliferation of digital microscopy images, driven by advances in automated whole slide scanning, presents significant opportunities for biomedical research and clinical diagnostics. However, accurately annotating densely packed information in these images remains a major challenge. To address this, we introduce DiffKillR, a novel framework that reframes cell annotation as the combination of a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.03058v1-abstract-full').style.display = 'inline'; document.getElementById('2410.03058v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.03058v1-abstract-full" style="display: none;"> The proliferation of digital microscopy images, driven by advances in automated whole slide scanning, presents significant opportunities for biomedical research and clinical diagnostics. However, accurately annotating densely packed information in these images remains a major challenge. To address this, we introduce DiffKillR, a novel framework that reframes cell annotation as the combination of archetype matching and image registration tasks. DiffKillR employs two complementary neural networks: one that learns a diffeomorphism-invariant feature space for robust cell matching and another that computes the precise warping field between cells for annotation mapping. Using a small set of annotated archetypes, DiffKillR efficiently propagates annotations across large microscopy images, reducing the need for extensive manual labeling. More importantly, it is suitable for any type of pixel-level annotation. We will discuss the theoretical properties of DiffKillR and validate it on three microscopy tasks, demonstrating its advantages over existing supervised, semi-supervised, and unsupervised methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.03058v1-abstract-full').style.display = 'none'; document.getElementById('2410.03058v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.00047">arXiv:2410.00047</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.00047">pdf</a>, <a href="https://arxiv.org/format/2410.00047">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> </div> </div> <p class="title is-5 mathjax"> Looking through the mind&#39;s eye via multimodal encoder-decoder networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Afrasiyabi%2C+A">Arman Afrasiyabi</a>, <a href="/search/cs?searchtype=author&amp;query=Busch%2C+E">Erica Busch</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+R">Rahul Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Bhaskar%2C+D">Dhananjay Bhaskar</a>, <a href="/search/cs?searchtype=author&amp;query=Caplette%2C+L">Laurent Caplette</a>, <a href="/search/cs?searchtype=author&amp;query=Turk-Browne%2C+N">Nicholas Turk-Browne</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.00047v1-abstract-short" style="display: inline;"> In this work, we explore the decoding of mental imagery from subjects using their fMRI measurements. In order to achieve this decoding, we first created a mapping between a subject&#39;s fMRI signals elicited by the videos the subjects watched. This mapping associates the high dimensional fMRI activation states with visual imagery. Next, we prompted the subjects textually, primarily with emotion label&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00047v1-abstract-full').style.display = 'inline'; document.getElementById('2410.00047v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.00047v1-abstract-full" style="display: none;"> In this work, we explore the decoding of mental imagery from subjects using their fMRI measurements. In order to achieve this decoding, we first created a mapping between a subject&#39;s fMRI signals elicited by the videos the subjects watched. This mapping associates the high dimensional fMRI activation states with visual imagery. Next, we prompted the subjects textually, primarily with emotion labels which had no direct reference to visual objects. Then to decode visual imagery that may have been in a person&#39;s mind&#39;s eye, we align a latent representation of these fMRI measurements with a corresponding video-fMRI based on textual labels given to the videos themselves. This alignment has the effect of overlapping the video fMRI embedding with the text-prompted fMRI embedding, thus allowing us to use our fMRI-to-video mapping to decode. Additionally, we enhance an existing fMRI dataset, initially consisting of data from five subjects, by including recordings from three more subjects gathered by our team. We demonstrate the efficacy of our model on this augmented dataset both in accurately creating a mapping, as well as in plausibly decoding mental imagery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00047v1-abstract-full').style.display = 'none'; document.getElementById('2410.00047v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.18462">arXiv:2409.18462</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.18462">pdf</a>, <a href="https://arxiv.org/format/2409.18462">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> </div> </div> <p class="title is-5 mathjax"> Latent Representation Learning for Multimodal Brain Activity Translation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Afrasiyabi%2C+A">Arman Afrasiyabi</a>, <a href="/search/cs?searchtype=author&amp;query=Bhaskar%2C+D">Dhananjay Bhaskar</a>, <a href="/search/cs?searchtype=author&amp;query=Busch%2C+E+L">Erica L. Busch</a>, <a href="/search/cs?searchtype=author&amp;query=Caplette%2C+L">Laurent Caplette</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+R">Rahul Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Lajoie%2C+G">Guillaume Lajoie</a>, <a href="/search/cs?searchtype=author&amp;query=Turk-Browne%2C+N+B">Nicholas B. Turk-Browne</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.18462v1-abstract-short" style="display: inline;"> Neuroscience employs diverse neuroimaging techniques, each offering distinct insights into brain activity, from electrophysiological recordings such as EEG, which have high temporal resolution, to hemodynamic modalities such as fMRI, which have increased spatial precision. However, integrating these heterogeneous data sources remains a challenge, which limits a comprehensive understanding of brain&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.18462v1-abstract-full').style.display = 'inline'; document.getElementById('2409.18462v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.18462v1-abstract-full" style="display: none;"> Neuroscience employs diverse neuroimaging techniques, each offering distinct insights into brain activity, from electrophysiological recordings such as EEG, which have high temporal resolution, to hemodynamic modalities such as fMRI, which have increased spatial precision. However, integrating these heterogeneous data sources remains a challenge, which limits a comprehensive understanding of brain function. We present the Spatiotemporal Alignment of Multimodal Brain Activity (SAMBA) framework, which bridges the spatial and temporal resolution gaps across modalities by learning a unified latent space free of modality-specific biases. SAMBA introduces a novel attention-based wavelet decomposition for spectral filtering of electrophysiological recordings, graph attention networks to model functional connectivity between functional brain units, and recurrent layers to capture temporal autocorrelations in brain signal. We show that the training of SAMBA, aside from achieving translation, also learns a rich representation of brain information processing. We showcase this classify external stimuli driving brain activity from the representation learned in hidden layers of SAMBA, paving the way for broad downstream applications in neuroscience research and clinical contexts. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.18462v1-abstract-full').style.display = 'none'; document.getElementById('2409.18462v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.09469">arXiv:2409.09469</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.09469">pdf</a>, <a href="https://arxiv.org/format/2409.09469">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> Hyperedge Representations with Hypergraph Wavelets: Applications to Spatial Transcriptomics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xingzhi Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+C">Charles Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Rocha%2C+J+F">Jo茫o F. Rocha</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+C">Chen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Hollander-Bodie%2C+B">Benjamin Hollander-Bodie</a>, <a href="/search/cs?searchtype=author&amp;query=Goldman%2C+L">Laney Goldman</a>, <a href="/search/cs?searchtype=author&amp;query=DiStasio%2C+M">Marcello DiStasio</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M">Michael Perlmutter</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.09469v1-abstract-short" style="display: inline;"> In many data-driven applications, higher-order relationships among multiple objects are essential in capturing complex interactions. Hypergraphs, which generalize graphs by allowing edges to connect any number of nodes, provide a flexible and powerful framework for modeling such higher-order relationships. In this work, we introduce hypergraph diffusion wavelets and describe their favorable spectr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09469v1-abstract-full').style.display = 'inline'; document.getElementById('2409.09469v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.09469v1-abstract-full" style="display: none;"> In many data-driven applications, higher-order relationships among multiple objects are essential in capturing complex interactions. Hypergraphs, which generalize graphs by allowing edges to connect any number of nodes, provide a flexible and powerful framework for modeling such higher-order relationships. In this work, we introduce hypergraph diffusion wavelets and describe their favorable spectral and spatial properties. We demonstrate their utility for biomedical discovery in spatially resolved transcriptomics by applying the method to represent disease-relevant cellular niches for Alzheimer&#39;s disease. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09469v1-abstract-full').style.display = 'none'; document.getElementById('2409.09469v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.14794">arXiv:2406.14794</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.14794">pdf</a>, <a href="https://arxiv.org/format/2406.14794">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> ImageFlowNet: Forecasting Multiscale Image-Level Trajectories of Disease Progression with Irregularly-Sampled Longitudinal Medical Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+C">Chen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+K">Ke Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+L+L">Liangbo L. Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Huguet%2C+G">Guillaume Huguet</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zilong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Bzdok%2C+D">Danilo Bzdok</a>, <a href="/search/cs?searchtype=author&amp;query=Stewart%2C+J">Jay Stewart</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J+C">Jay C. Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Del+Priore%2C+L+V">Lucian V. Del Priore</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.14794v4-abstract-short" style="display: inline;"> Advances in medical imaging technologies have enabled the collection of longitudinal images, which involve repeated scanning of the same patients over time, to monitor disease progression. However, predictive modeling of such data remains challenging due to high dimensionality, irregular sampling, and data sparsity. To address these issues, we propose ImageFlowNet, a novel model designed to foreca&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.14794v4-abstract-full').style.display = 'inline'; document.getElementById('2406.14794v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.14794v4-abstract-full" style="display: none;"> Advances in medical imaging technologies have enabled the collection of longitudinal images, which involve repeated scanning of the same patients over time, to monitor disease progression. However, predictive modeling of such data remains challenging due to high dimensionality, irregular sampling, and data sparsity. To address these issues, we propose ImageFlowNet, a novel model designed to forecast disease trajectories from initial images while preserving spatial details. ImageFlowNet first learns multiscale joint representation spaces across patients and time points, then optimizes deterministic or stochastic flow fields within these spaces using a position-parameterized neural ODE/SDE framework. The model leverages a UNet architecture to create robust multiscale representations and mitigates data scarcity by combining knowledge from all patients. We provide theoretical insights that support our formulation of ODEs, and motivate our regularizations involving high-level visual features, latent space organization, and trajectory smoothness. We validate ImageFlowNet on three longitudinal medical image datasets depicting progression in geographic atrophy, multiple sclerosis, and glioblastoma, demonstrating its ability to effectively forecast disease progression and outperform existing methods. Our contributions include the development of ImageFlowNet, its theoretical underpinnings, and empirical validation on real-world datasets. The official implementation is available at https://github.com/KrishnaswamyLab/ImageFlowNet. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.14794v4-abstract-full').style.display = 'none'; document.getElementById('2406.14794v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Updated narration and moved ablation to main text</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.00235">arXiv:2404.00235</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.00235">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.5772/intechopen.100776">10.5772/intechopen.100776 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Information Security and Privacy in the Digital World: Some Selected Topics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sen%2C+J">Jaydip Sen</a>, <a href="/search/cs?searchtype=author&amp;query=Mayer%2C+J">Joceli Mayer</a>, <a href="/search/cs?searchtype=author&amp;query=Dasgupta%2C+S">Subhasis Dasgupta</a>, <a href="/search/cs?searchtype=author&amp;query=Nandi%2C+S">Subrata Nandi</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Srinivasan Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Mitra%2C+P">Pinaki Mitra</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+M+P">Mahendra Pratap Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Kundeti%2C+N+P">Naga Prasanthi Kundeti</a>, <a href="/search/cs?searchtype=author&amp;query=MVP%2C+C+S+R">Chandra Sekhara Rao MVP</a>, <a href="/search/cs?searchtype=author&amp;query=Chekuri%2C+S+S">Sudha Sree Chekuri</a>, <a href="/search/cs?searchtype=author&amp;query=Pallapothu%2C+S+B">Seshu Babu Pallapothu</a>, <a href="/search/cs?searchtype=author&amp;query=Nanjundan%2C+P">Preethi Nanjundan</a>, <a href="/search/cs?searchtype=author&amp;query=George%2C+J+P">Jossy P. George</a>, <a href="/search/cs?searchtype=author&amp;query=Allahi%2C+A+E">Abdelhadi El Allahi</a>, <a href="/search/cs?searchtype=author&amp;query=Morino%2C+I">Ilham Morino</a>, <a href="/search/cs?searchtype=author&amp;query=Oussous%2C+S+A">Salma AIT Oussous</a>, <a href="/search/cs?searchtype=author&amp;query=Beloualid%2C+S">Siham Beloualid</a>, <a href="/search/cs?searchtype=author&amp;query=Tamtaoui%2C+A">Ahmed Tamtaoui</a>, <a href="/search/cs?searchtype=author&amp;query=Bajit%2C+A">Abderrahim Bajit</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.00235v1-abstract-short" style="display: inline;"> In the era of generative artificial intelligence and the Internet of Things, while there is explosive growth in the volume of data and the associated need for processing, analysis, and storage, several new challenges are faced in identifying spurious and fake information and protecting the privacy of sensitive data. This has led to an increasing demand for more robust and resilient schemes for aut&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00235v1-abstract-full').style.display = 'inline'; document.getElementById('2404.00235v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.00235v1-abstract-full" style="display: none;"> In the era of generative artificial intelligence and the Internet of Things, while there is explosive growth in the volume of data and the associated need for processing, analysis, and storage, several new challenges are faced in identifying spurious and fake information and protecting the privacy of sensitive data. This has led to an increasing demand for more robust and resilient schemes for authentication, integrity protection, encryption, non-repudiation, and privacy-preservation of data. The chapters in this book present some of the state-of-the-art research works in the field of cryptography and security in computing and communications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00235v1-abstract-full').style.display = 'none'; document.getElementById('2404.00235v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published by IntechOpen, London Uk in Nov 2023, the book contains 8 chapters spanning over 131 pages. arXiv admin note: text overlap with arXiv:2307.02055, arXiv:2304.00258</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.04823">arXiv:2312.04823</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.04823">pdf</a>, <a href="https://arxiv.org/format/2312.04823">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Assessing Neural Network Representations During Training Using Noise-Resilient Diffusion Spectral Entropy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liao%2C+D">Danqi Liao</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+C">Chen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Christensen%2C+B+W">Benjamin W. Christensen</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Huguet%2C+G">Guillaume Huguet</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Nickel%2C+M">Maximilian Nickel</a>, <a href="/search/cs?searchtype=author&amp;query=Adelstein%2C+I">Ian Adelstein</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.04823v1-abstract-short" style="display: inline;"> Entropy and mutual information in neural networks provide rich information on the learning process, but they have proven difficult to compute reliably in high dimensions. Indeed, in noisy and high-dimensional data, traditional estimates in ambient dimensions approach a fixed entropy and are prohibitively hard to compute. To address these issues, we leverage data geometry to access the underlying m&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.04823v1-abstract-full').style.display = 'inline'; document.getElementById('2312.04823v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.04823v1-abstract-full" style="display: none;"> Entropy and mutual information in neural networks provide rich information on the learning process, but they have proven difficult to compute reliably in high dimensions. Indeed, in noisy and high-dimensional data, traditional estimates in ambient dimensions approach a fixed entropy and are prohibitively hard to compute. To address these issues, we leverage data geometry to access the underlying manifold and reliably compute these information-theoretic measures. Specifically, we define diffusion spectral entropy (DSE) in neural representations of a dataset as well as diffusion spectral mutual information (DSMI) between different variables representing data. First, we show that they form noise-resistant measures of intrinsic dimensionality and relationship strength in high-dimensional simulated data that outperform classic Shannon entropy, nonparametric estimation, and mutual information neural estimation (MINE). We then study the evolution of representations in classification networks with supervised learning, self-supervision, or overfitting. We observe that (1) DSE of neural representations increases during training; (2) DSMI with the class label increases during generalizable learning but stays stagnant during overfitting; (3) DSMI with the input signal shows differing trends: on MNIST it increases, while on CIFAR-10 and STL-10 it decreases. Finally, we show that DSE can be used to guide better network initialization and that DSMI can be used to predict downstream classification accuracy across 962 models on ImageNet. The official implementation is available at https://github.com/ChenLiu-1996/DiffusionSpectralEntropy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.04823v1-abstract-full').style.display = 'none'; document.getElementById('2312.04823v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> ICML 2023 Workshop on Topology, Algebra, and Geometry in Machine Learning </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.16378">arXiv:2311.16378</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.16378">pdf</a>, <a href="https://arxiv.org/format/2311.16378">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Bayesian Formulations for Graph Spectral Denoising </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Leone%2C+S">Sam Leone</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xingzhi Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M">Michael Perlmutter</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.16378v2-abstract-short" style="display: inline;"> Here we consider the problem of denoising features associated to complex data, modeled as signals on a graph, via a smoothness prior. This is motivated in part by settings such as single-cell RNA where the data is very high-dimensional, but its structure can be captured via an affinity graph. This allows us to utilize ideas from graph signal processing. In particular, we present algorithms for the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.16378v2-abstract-full').style.display = 'inline'; document.getElementById('2311.16378v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.16378v2-abstract-full" style="display: none;"> Here we consider the problem of denoising features associated to complex data, modeled as signals on a graph, via a smoothness prior. This is motivated in part by settings such as single-cell RNA where the data is very high-dimensional, but its structure can be captured via an affinity graph. This allows us to utilize ideas from graph signal processing. In particular, we present algorithms for the cases where the signal is perturbed by Gaussian noise, dropout, and uniformly distributed noise. The signals are assumed to follow a prior distribution defined in the frequency domain which favors signals which are smooth across the edges of the graph. By pairing this prior distribution with our three models of noise generation, we propose Maximum A Posteriori (M.A.P.) estimates of the true signal in the presence of noisy data and provide algorithms for computing the M.A.P. Finally, we demonstrate the algorithms&#39; ability to effectively restore signals from white noise on image data and from severe dropout in single-cell RNA sequence data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.16378v2-abstract-full').style.display = 'none'; document.getElementById('2311.16378v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.13812">arXiv:2311.13812</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.13812">pdf</a>, <a href="https://arxiv.org/format/2311.13812">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Mechanical Characterization and Inverse Design of Stochastic Architected Metamaterials Using Neural Operators </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jin%2C+H">Hanxun Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+E">Enrui Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+B">Boyu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Sridhar Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Karniadakis%2C+G+E">George Em Karniadakis</a>, <a href="/search/cs?searchtype=author&amp;query=Espinosa%2C+H+D">Horacio D. Espinosa</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.13812v2-abstract-short" style="display: inline;"> Machine learning (ML) is emerging as a transformative tool for the design of architected materials, offering properties that far surpass those achievable through lab-based trial-and-error methods. However, a major challenge in current inverse design strategies is their reliance on extensive computational and/or experimental datasets, which becomes particularly problematic for designing micro-scale&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.13812v2-abstract-full').style.display = 'inline'; document.getElementById('2311.13812v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.13812v2-abstract-full" style="display: none;"> Machine learning (ML) is emerging as a transformative tool for the design of architected materials, offering properties that far surpass those achievable through lab-based trial-and-error methods. However, a major challenge in current inverse design strategies is their reliance on extensive computational and/or experimental datasets, which becomes particularly problematic for designing micro-scale stochastic architected materials that exhibit nonlinear mechanical behaviors. Here, we introduce a new end-to-end scientific ML framework, leveraging deep neural operators (DeepONet), to directly learn the relationship between the complete microstructure and mechanical response of architected metamaterials from sparse but high-quality in situ experimental data. The approach facilitates the inverse design of structures tailored to specific nonlinear mechanical behaviors. Results obtained from spinodal microstructures, printed using two-photon lithography, reveal that the prediction error for mechanical responses is within a range of 5 - 10%. Our work underscores that by employing neural operators with advanced micro-mechanics experimental techniques, the design of complex micro-architected materials with desired properties becomes feasible, even in scenarios constrained by data scarcity. Our work marks a significant advancement in the field of materials-by-design, potentially heralding a new era in the discovery and development of next-generation metamaterials with unparalleled mechanical characteristics derived directly from experimental insights. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.13812v2-abstract-full').style.display = 'none'; document.getElementById('2311.13812v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">29 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.17579">arXiv:2310.17579</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.17579">pdf</a>, <a href="https://arxiv.org/format/2310.17579">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> BLIS-Net: Classifying and Analyzing Signals on Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+C">Charles Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Goldman%2C+L">Laney Goldman</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+V">Valentina Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Hollander-Bodie%2C+B">Benjamin Hollander-Bodie</a>, <a href="/search/cs?searchtype=author&amp;query=Trank-Greene%2C+M">Maedee Trank-Greene</a>, <a href="/search/cs?searchtype=author&amp;query=Adelstein%2C+I">Ian Adelstein</a>, <a href="/search/cs?searchtype=author&amp;query=De+Brouwer%2C+E">Edward De Brouwer</a>, <a href="/search/cs?searchtype=author&amp;query=Ying%2C+R">Rex Ying</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M">Michael Perlmutter</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.17579v1-abstract-short" style="display: inline;"> Graph neural networks (GNNs) have emerged as a powerful tool for tasks such as node classification and graph classification. However, much less work has been done on signal classification, where the data consists of many functions (referred to as signals) defined on the vertices of a single graph. These tasks require networks designed differently from those designed for traditional GNN tasks. Inde&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.17579v1-abstract-full').style.display = 'inline'; document.getElementById('2310.17579v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.17579v1-abstract-full" style="display: none;"> Graph neural networks (GNNs) have emerged as a powerful tool for tasks such as node classification and graph classification. However, much less work has been done on signal classification, where the data consists of many functions (referred to as signals) defined on the vertices of a single graph. These tasks require networks designed differently from those designed for traditional GNN tasks. Indeed, traditional GNNs rely on localized low-pass filters, and signals of interest may have intricate multi-frequency behavior and exhibit long range interactions. This motivates us to introduce the BLIS-Net (Bi-Lipschitz Scattering Net), a novel GNN that builds on the previously introduced geometric scattering transform. Our network is able to capture both local and global signal structure and is able to capture both low-frequency and high-frequency information. We make several crucial changes to the original geometric scattering architecture which we prove increase the ability of our network to capture information about the input signal and show that BLIS-Net achieves superior performance on both synthetic and real-world data sets based on traffic flow and fMRI data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.17579v1-abstract-full').style.display = 'none'; document.getElementById('2310.17579v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proceedings of The 27th International Conference on Artificial Intelligence and Statistics, PMLR 238:4537-4545, 2024 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.09924">arXiv:2309.09924</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.09924">pdf</a>, <a href="https://arxiv.org/format/2309.09924">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Learning graph geometry and topology using dynamical systems based message-passing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bhaskar%2C+D">Dhananjay Bhaskar</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yanlei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+C">Charles Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xingzhi Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Fasina%2C+O">Oluwadamilola Fasina</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Nickel%2C+M">Maximilian Nickel</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M">Michael Perlmutter</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.09924v4-abstract-short" style="display: inline;"> In this paper we introduce DYMAG: a message passing paradigm for GNNs built on the expressive power of continuous, multiscale graph-dynamics. Standard discrete-time message passing algorithms implicitly make use of simplistic graph dynamics and aggregation schemes which limit their ability to capture fundamental graph topological properties. By contrast, DYMAG makes use of complex graph dynamics b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.09924v4-abstract-full').style.display = 'inline'; document.getElementById('2309.09924v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.09924v4-abstract-full" style="display: none;"> In this paper we introduce DYMAG: a message passing paradigm for GNNs built on the expressive power of continuous, multiscale graph-dynamics. Standard discrete-time message passing algorithms implicitly make use of simplistic graph dynamics and aggregation schemes which limit their ability to capture fundamental graph topological properties. By contrast, DYMAG makes use of complex graph dynamics based on the heat and wave equation as well as a more complex equation which admits chaotic solutions. The continuous nature of the dynamics are leveraged to generate multiscale (dynamic-time snapshot) representations which we prove are linked to various graph topological and spectral properties. We demonstrate experimentally that DYMAG achieves superior performance in recovering the generating parameters of Erd枚s-Renyi and stochastic block model random graphs and the persistent homology of synthetic graphs and citation network. Since the behavior of proteins and biomolecules is sensitive to graph topology and exhibits important structure at multiple scales, we find that DYMAG outperforms other methods at predicting salient features of various biomolecules. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.09924v4-abstract-full').style.display = 'none'; document.getElementById('2309.09924v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.07813">arXiv:2309.07813</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.07813">pdf</a>, <a href="https://arxiv.org/format/2309.07813">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cell Behavior">q-bio.CB</span> </div> </div> <p class="title is-5 mathjax"> Directed Scattering for Knowledge Graph-based Cellular Signaling Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Venkat%2C+A">Aarthi Venkat</a>, <a href="/search/cs?searchtype=author&amp;query=Chew%2C+J">Joyce Chew</a>, <a href="/search/cs?searchtype=author&amp;query=Rodriguez%2C+F+C">Ferran Cardoso Rodriguez</a>, <a href="/search/cs?searchtype=author&amp;query=Tape%2C+C+J">Christopher J. Tape</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M">Michael Perlmutter</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.07813v1-abstract-short" style="display: inline;"> Directed graphs are a natural model for many phenomena, in particular scientific knowledge graphs such as molecular interaction or chemical reaction networks that define cellular signaling relationships. In these situations, source nodes typically have distinct biophysical properties from sinks. Due to their ordered and unidirectional relationships, many such networks also have hierarchical and mu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.07813v1-abstract-full').style.display = 'inline'; document.getElementById('2309.07813v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.07813v1-abstract-full" style="display: none;"> Directed graphs are a natural model for many phenomena, in particular scientific knowledge graphs such as molecular interaction or chemical reaction networks that define cellular signaling relationships. In these situations, source nodes typically have distinct biophysical properties from sinks. Due to their ordered and unidirectional relationships, many such networks also have hierarchical and multiscale structure. However, the majority of methods performing node- and edge-level tasks in machine learning do not take these properties into account, and thus have not been leveraged effectively for scientific tasks such as cellular signaling network inference. We propose a new framework called Directed Scattering Autoencoder (DSAE) which uses a directed version of a geometric scattering transform, combined with the non-linear dimensionality reduction properties of an autoencoder and the geometric properties of the hyperbolic space to learn latent hierarchies. We show this method outperforms numerous others on tasks such as embedding directed graphs and learning cellular signaling networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.07813v1-abstract-full').style.display = 'none'; document.getElementById('2309.07813v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.00176">arXiv:2308.00176</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.00176">pdf</a>, <a href="https://arxiv.org/format/2308.00176">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> A Flow Artist for High-Dimensional Cellular Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=MacDonald%2C+K">Kincaid MacDonald</a>, <a href="/search/cs?searchtype=author&amp;query=Bhaskar%2C+D">Dhananjay Bhaskar</a>, <a href="/search/cs?searchtype=author&amp;query=Thampakkul%2C+G">Guy Thampakkul</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+N">Nhi Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Joia Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M">Michael Perlmutter</a>, <a href="/search/cs?searchtype=author&amp;query=Adelstein%2C+I">Ian Adelstein</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.00176v1-abstract-short" style="display: inline;"> We consider the problem of embedding point cloud data sampled from an underlying manifold with an associated flow or velocity. Such data arises in many contexts where static snapshots of dynamic entities are measured, including in high-throughput biology such as single-cell transcriptomics. Existing embedding techniques either do not utilize velocity information or embed the coordinates and veloci&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.00176v1-abstract-full').style.display = 'inline'; document.getElementById('2308.00176v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.00176v1-abstract-full" style="display: none;"> We consider the problem of embedding point cloud data sampled from an underlying manifold with an associated flow or velocity. Such data arises in many contexts where static snapshots of dynamic entities are measured, including in high-throughput biology such as single-cell transcriptomics. Existing embedding techniques either do not utilize velocity information or embed the coordinates and velocities independently, i.e., they either impose velocities on top of an existing point embedding or embed points within a prescribed vector field. Here we present FlowArtist, a neural network that embeds points while jointly learning a vector field around the points. The combination allows FlowArtist to better separate and visualize velocity-informed structures. Our results, on toy datasets and single-cell RNA velocity data, illustrate the value of utilizing coordinate and velocity information in tandem for embedding and visualizing high-dimensional data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.00176v1-abstract-full').style.display = 'none'; document.getElementById('2308.00176v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication in 2023 IEEE 33rd International Workshop on Machine Learning for Signal Processing (MLSP)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.04056">arXiv:2307.04056</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.04056">pdf</a>, <a href="https://arxiv.org/format/2307.04056">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Numerical Analysis">math.NA</span> </div> </div> <p class="title is-5 mathjax"> Manifold Filter-Combine Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chew%2C+J">Joyce Chew</a>, <a href="/search/cs?searchtype=author&amp;query=De+Brouwer%2C+E">Edward De Brouwer</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Needell%2C+D">Deanna Needell</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M">Michael Perlmutter</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.04056v3-abstract-short" style="display: inline;"> We introduce a class of manifold neural networks (MNNs) that we call Manifold Filter-Combine Networks (MFCNs), that aims to further our understanding of MNNs, analogous to how the aggregate-combine framework helps with the understanding of graph neural networks (GNNs). This class includes a wide variety of subclasses that can be thought of as the manifold analog of various popular GNNs. We then co&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.04056v3-abstract-full').style.display = 'inline'; document.getElementById('2307.04056v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.04056v3-abstract-full" style="display: none;"> We introduce a class of manifold neural networks (MNNs) that we call Manifold Filter-Combine Networks (MFCNs), that aims to further our understanding of MNNs, analogous to how the aggregate-combine framework helps with the understanding of graph neural networks (GNNs). This class includes a wide variety of subclasses that can be thought of as the manifold analog of various popular GNNs. We then consider a method, based on building a data-driven graph, for implementing such networks when one does not have global knowledge of the manifold, but merely has access to finitely many sample points. We provide sufficient conditions for the network to provably converge to its continuum limit as the number of sample points tends to infinity. Unlike previous work (which focused on specific graph constructions), our rate of convergence does not directly depend on the number of filters used. Moreover, it exhibits linear dependence on the depth of the network rather than the exponential dependence obtained previously. Additionally, we provide several examples of interesting subclasses of MFCNs and of the rates of convergence that are obtained under specific graph constructions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.04056v3-abstract-full').style.display = 'none'; document.getElementById('2307.04056v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.02182">arXiv:2307.02182</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.02182">pdf</a>, <a href="https://arxiv.org/format/2307.02182">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> A Scheme to resist Fast Correlation Attack for Word Oriented LFSR based Stream Cipher </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nandi%2C+S">Subrata Nandi</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Srinivasan Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Mitra%2C+P">Pinaki Mitra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.02182v1-abstract-short" style="display: inline;"> In LFSR-based stream ciphers, the knowledge of the feedback equation of the LFSR plays a critical role in most attacks. In word-based stream ciphers such as those in the SNOW series, even if the feedback configuration is hidden, knowing the characteristic polynomial of the state transition matrix of the LFSR enables the attacker to create a feedback equation over $GF(2)$. This, in turn, can be use&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.02182v1-abstract-full').style.display = 'inline'; document.getElementById('2307.02182v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.02182v1-abstract-full" style="display: none;"> In LFSR-based stream ciphers, the knowledge of the feedback equation of the LFSR plays a critical role in most attacks. In word-based stream ciphers such as those in the SNOW series, even if the feedback configuration is hidden, knowing the characteristic polynomial of the state transition matrix of the LFSR enables the attacker to create a feedback equation over $GF(2)$. This, in turn, can be used to launch fast correlation attacks. In this work, we propose a method for hiding both the feedback equation of a word-based LFSR and the characteristic polynomial of the state transition matrix. Here, we employ a $z$-primitive $蟽$-LFSR whose characteristic polynomial is randomly sampled from the distribution of primitive polynomials over $GF(2)$ of the appropriate degree. We propose an algorithm for locating $z$-primitive $蟽$-LFSR configurations of a given degree. Further, an invertible matrix is generated from the key. This is then employed to generate a public parameter which is used to retrieve the feedback configuration using the key. If the key size is $n$- bits, the process of retrieving the feedback equation from the public parameter has a average time complexity $\mathbb{O}(2^{n-1})$. The proposed method has been tested on SNOW 2.0 and SNOW 3G for resistance to fast correlation attacks. We have demonstrated that the security of SNOW 2.0 and SNOW 3G increases from 128 bits to 256 bits. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.02182v1-abstract-full').style.display = 'none'; document.getElementById('2307.02182v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.07803">arXiv:2306.07803</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.07803">pdf</a>, <a href="https://arxiv.org/format/2306.07803">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Inferring dynamic regulatory interaction graphs from time series data with perturbations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bhaskar%2C+D">Dhananjay Bhaskar</a>, <a href="/search/cs?searchtype=author&amp;query=Magruder%2C+S">Sumner Magruder</a>, <a href="/search/cs?searchtype=author&amp;query=De+Brouwer%2C+E">Edward De Brouwer</a>, <a href="/search/cs?searchtype=author&amp;query=Venkat%2C+A">Aarthi Venkat</a>, <a href="/search/cs?searchtype=author&amp;query=Wenkel%2C+F">Frederik Wenkel</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.07803v1-abstract-short" style="display: inline;"> Complex systems are characterized by intricate interactions between entities that evolve dynamically over time. Accurate inference of these dynamic relationships is crucial for understanding and predicting system behavior. In this paper, we propose Regulatory Temporal Interaction Network Inference (RiTINI) for inferring time-varying interaction graphs in complex systems using a novel combination o&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.07803v1-abstract-full').style.display = 'inline'; document.getElementById('2306.07803v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.07803v1-abstract-full" style="display: none;"> Complex systems are characterized by intricate interactions between entities that evolve dynamically over time. Accurate inference of these dynamic relationships is crucial for understanding and predicting system behavior. In this paper, we propose Regulatory Temporal Interaction Network Inference (RiTINI) for inferring time-varying interaction graphs in complex systems using a novel combination of space-and-time graph attentions and graph neural ordinary differential equations (ODEs). RiTINI leverages time-lapse signals on a graph prior, as well as perturbations of signals at various nodes in order to effectively capture the dynamics of the underlying system. This approach is distinct from traditional causal inference networks, which are limited to inferring acyclic and static graphs. In contrast, RiTINI can infer cyclic, directed, and time-varying graphs, providing a more comprehensive and accurate representation of complex systems. The graph attention mechanism in RiTINI allows the model to adaptively focus on the most relevant interactions in time and space, while the graph neural ODEs enable continuous-time modeling of the system&#39;s dynamics. We evaluate RiTINI&#39;s performance on various simulated and real-world datasets, demonstrating its state-of-the-art capability in inferring interaction graphs compared to previous methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.07803v1-abstract-full').style.display = 'none'; document.getElementById('2306.07803v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.06062">arXiv:2306.06062</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.06062">pdf</a>, <a href="https://arxiv.org/format/2306.06062">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Neural FIM for learning Fisher Information Metrics from point cloud data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Fasina%2C+O">Oluwadamilola Fasina</a>, <a href="/search/cs?searchtype=author&amp;query=Huguet%2C+G">Guillaume Huguet</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yanlei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Nickel%2C+M">Maximilian Nickel</a>, <a href="/search/cs?searchtype=author&amp;query=Adelstein%2C+I">Ian Adelstein</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.06062v2-abstract-short" style="display: inline;"> Although data diffusion embeddings are ubiquitous in unsupervised learning and have proven to be a viable technique for uncovering the underlying intrinsic geometry of data, diffusion embeddings are inherently limited due to their discrete nature. To this end, we propose neural FIM, a method for computing the Fisher information metric (FIM) from point cloud data - allowing for a continuous manifol&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.06062v2-abstract-full').style.display = 'inline'; document.getElementById('2306.06062v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.06062v2-abstract-full" style="display: none;"> Although data diffusion embeddings are ubiquitous in unsupervised learning and have proven to be a viable technique for uncovering the underlying intrinsic geometry of data, diffusion embeddings are inherently limited due to their discrete nature. To this end, we propose neural FIM, a method for computing the Fisher information metric (FIM) from point cloud data - allowing for a continuous manifold model for the data. Neural FIM creates an extensible metric space from discrete point cloud data such that information from the metric can inform us of manifold characteristics such as volume and geodesics. We demonstrate Neural FIM&#39;s utility in selecting parameters for the PHATE visualization method as well as its ability to obtain information pertaining to local volume illuminating branching points and cluster centers embeddings of a toy dataset and two single-cell datasets of IPSC reprogramming and PBMCs (immune cells). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.06062v2-abstract-full').style.display = 'none'; document.getElementById('2306.06062v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 11 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.02508">arXiv:2306.02508</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.02508">pdf</a>, <a href="https://arxiv.org/format/2306.02508">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Graph Fourier MMD for Signals on Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Leone%2C+S">Samuel Leone</a>, <a href="/search/cs?searchtype=author&amp;query=Venkat%2C+A">Aarthi Venkat</a>, <a href="/search/cs?searchtype=author&amp;query=Huguet%2C+G">Guillaume Huguet</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.02508v1-abstract-short" style="display: inline;"> While numerous methods have been proposed for computing distances between probability distributions in Euclidean space, relatively little attention has been given to computing such distances for distributions on graphs. However, there has been a marked increase in data that either lies on graph (such as protein interaction networks) or can be modeled as a graph (single cell data), particularly in&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.02508v1-abstract-full').style.display = 'inline'; document.getElementById('2306.02508v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.02508v1-abstract-full" style="display: none;"> While numerous methods have been proposed for computing distances between probability distributions in Euclidean space, relatively little attention has been given to computing such distances for distributions on graphs. However, there has been a marked increase in data that either lies on graph (such as protein interaction networks) or can be modeled as a graph (single cell data), particularly in the biomedical sciences. Thus, it becomes important to find ways to compare signals defined on such graphs. Here, we propose Graph Fourier MMD (GFMMD), a novel distance between distributions and signals on graphs. GFMMD is defined via an optimal witness function that is both smooth on the graph and maximizes difference in expectation between the pair of distributions on the graph. We find an analytical solution to this optimization problem as well as an embedding of distributions that results from this method. We also prove several properties of this method including scale invariance and applicability to disconnected graphs. We showcase it on graph benchmark datasets as well on single cell RNA-sequencing data analysis. In the latter, we use the GFMMD-based gene embeddings to find meaningful gene clusters. We also propose a novel type of score for gene selection called &#34;gene localization score&#34; which helps select genes for cellular state space characterization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.02508v1-abstract-full').style.display = 'none'; document.getElementById('2306.02508v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.19043">arXiv:2305.19043</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.19043">pdf</a>, <a href="https://arxiv.org/format/2305.19043">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Genomics">q-bio.GN</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> A Heat Diffusion Perspective on Geodesic Preserving Dimensionality Reduction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huguet%2C+G">Guillaume Huguet</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=De+Brouwer%2C+E">Edward De Brouwer</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yanlei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Adelstein%2C+I">Ian Adelstein</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.19043v1-abstract-short" style="display: inline;"> Diffusion-based manifold learning methods have proven useful in representation learning and dimensionality reduction of modern high dimensional, high throughput, noisy datasets. Such datasets are especially present in fields like biology and physics. While it is thought that these methods preserve underlying manifold structure of data by learning a proxy for geodesic distances, no specific theoret&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.19043v1-abstract-full').style.display = 'inline'; document.getElementById('2305.19043v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.19043v1-abstract-full" style="display: none;"> Diffusion-based manifold learning methods have proven useful in representation learning and dimensionality reduction of modern high dimensional, high throughput, noisy datasets. Such datasets are especially present in fields like biology and physics. While it is thought that these methods preserve underlying manifold structure of data by learning a proxy for geodesic distances, no specific theoretical links have been established. Here, we establish such a link via results in Riemannian geometry explicitly connecting heat diffusion to manifold distances. In this process, we also formulate a more general heat kernel based manifold embedding method that we call heat geodesic embeddings. This novel perspective makes clearer the choices available in manifold learning and denoising. Results show that our method outperforms existing state of the art in preserving ground truth manifold distances, and preserving cluster structure in toy datasets. We also showcase our method on single cell RNA-sequencing datasets with both continuum and cluster structure, where our method enables interpolation of withheld timepoints of data. Finally, we show that parameters of our more general method can be configured to give results similar to PHATE (a state-of-the-art diffusion based manifold learning method) as well as SNE (an attraction/repulsion neighborhood based method that forms the basis of t-SNE). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.19043v1-abstract-full').style.display = 'none'; document.getElementById('2305.19043v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">31 pages, 13 figures, 10 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.00805">arXiv:2211.00805</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2211.00805">pdf</a>, <a href="https://arxiv.org/format/2211.00805">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> Geodesic Sinkhorn for Fast and Accurate Optimal Transport on Manifolds </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huguet%2C+G">Guillaume Huguet</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Zapatero%2C+M+R">Mar铆a Ramos Zapatero</a>, <a href="/search/cs?searchtype=author&amp;query=Tape%2C+C+J">Christopher J. Tape</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.00805v2-abstract-short" style="display: inline;"> Efficient computation of optimal transport distance between distributions is of growing importance in data science. Sinkhorn-based methods are currently the state-of-the-art for such computations, but require $O(n^2)$ computations. In addition, Sinkhorn-based methods commonly use an Euclidean ground distance between datapoints. However, with the prevalence of manifold structured scientific data, i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.00805v2-abstract-full').style.display = 'inline'; document.getElementById('2211.00805v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.00805v2-abstract-full" style="display: none;"> Efficient computation of optimal transport distance between distributions is of growing importance in data science. Sinkhorn-based methods are currently the state-of-the-art for such computations, but require $O(n^2)$ computations. In addition, Sinkhorn-based methods commonly use an Euclidean ground distance between datapoints. However, with the prevalence of manifold structured scientific data, it is often desirable to consider geodesic ground distance. Here, we tackle both issues by proposing Geodesic Sinkhorn -- based on diffusing a heat kernel on a manifold graph. Notably, Geodesic Sinkhorn requires only $O(n\log n)$ computation, as we approximate the heat kernel with Chebyshev polynomials based on the sparse graph Laplacian. We apply our method to the computation of barycenters of several distributions of high dimensional single cell data from patient samples undergoing chemotherapy. In particular, we define the barycentric distance as the distance between two such barycenters. Using this definition, we identify an optimal transport distance and path associated with the effect of treatment on cellular data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.00805v2-abstract-full').style.display = 'none'; document.getElementById('2211.00805v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">A shorter version without the appendix appeared in the IEEE International Workshop on Machine Learning for Signal Processing (2023)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.11359">arXiv:2209.11359</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.11359">pdf</a>, <a href="https://arxiv.org/format/2209.11359">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> CUTS: A Deep Learning and Topological Framework for Multigranular Unsupervised Medical Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+C">Chen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Amodio%2C+M">Matthew Amodio</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+L+L">Liangbo L. Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+F">Feng Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Avesta%2C+A">Arman Avesta</a>, <a href="/search/cs?searchtype=author&amp;query=Aneja%2C+S">Sanjay Aneja</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J+C">Jay C. Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Del+Priore%2C+L+V">Lucian V. Del Priore</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.11359v7-abstract-short" style="display: inline;"> Segmenting medical images is critical to facilitating both patient diagnoses and quantitative research. A major limiting factor is the lack of labeled data, as obtaining expert annotations for each new set of imaging data and task can be labor intensive and inconsistent among annotators. We present CUTS, an unsupervised deep learning framework for medical image segmentation. CUTS operates in two s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.11359v7-abstract-full').style.display = 'inline'; document.getElementById('2209.11359v7-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.11359v7-abstract-full" style="display: none;"> Segmenting medical images is critical to facilitating both patient diagnoses and quantitative research. A major limiting factor is the lack of labeled data, as obtaining expert annotations for each new set of imaging data and task can be labor intensive and inconsistent among annotators. We present CUTS, an unsupervised deep learning framework for medical image segmentation. CUTS operates in two stages. For each image, it produces an embedding map via intra-image contrastive learning and local patch reconstruction. Then, these embeddings are partitioned at dynamic granularity levels that correspond to the data topology. CUTS yields a series of coarse-to-fine-grained segmentations that highlight features at various granularities. We applied CUTS to retinal fundus images and two types of brain MRI images to delineate structures and patterns at different scales. When evaluated against predefined anatomical masks, CUTS improved the dice coefficient and Hausdorff distance by at least 10% compared to existing unsupervised methods. Finally, CUTS showed performance on par with Segment Anything Models (SAM, MedSAM, SAM-Med2D) pre-trained on gigantic labeled datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.11359v7-abstract-full').style.display = 'none'; document.getElementById('2209.11359v7-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to the 27th International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.08561">arXiv:2208.08561</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2208.08561">pdf</a>, <a href="https://arxiv.org/format/2208.08561">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Spectral Theory">math.SP</span> </div> </div> <p class="title is-5 mathjax"> Geometric Scattering on Measure Spaces </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chew%2C+J">Joyce Chew</a>, <a href="/search/cs?searchtype=author&amp;query=Hirn%2C+M">Matthew Hirn</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Needell%2C+D">Deanna Needell</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M">Michael Perlmutter</a>, <a href="/search/cs?searchtype=author&amp;query=Steach%2C+H">Holly Steach</a>, <a href="/search/cs?searchtype=author&amp;query=Viswanath%2C+S">Siddharth Viswanath</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+H">Hau-Tieng Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.08561v2-abstract-short" style="display: inline;"> The scattering transform is a multilayered, wavelet-based transform initially introduced as a model of convolutional neural networks (CNNs) that has played a foundational role in our understanding of these networks&#39; stability and invariance properties. Subsequently, there has been widespread interest in extending the success of CNNs to data sets with non-Euclidean structure, such as graphs and man&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.08561v2-abstract-full').style.display = 'inline'; document.getElementById('2208.08561v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.08561v2-abstract-full" style="display: none;"> The scattering transform is a multilayered, wavelet-based transform initially introduced as a model of convolutional neural networks (CNNs) that has played a foundational role in our understanding of these networks&#39; stability and invariance properties. Subsequently, there has been widespread interest in extending the success of CNNs to data sets with non-Euclidean structure, such as graphs and manifolds, leading to the emerging field of geometric deep learning. In order to improve our understanding of the architectures used in this new field, several papers have proposed generalizations of the scattering transform for non-Euclidean data structures such as undirected graphs and compact Riemannian manifolds without boundary. In this paper, we introduce a general, unified model for geometric scattering on measure spaces. Our proposed framework includes previous work on geometric scattering as special cases but also applies to more general settings such as directed graphs, signed graphs, and manifolds with boundary. We propose a new criterion that identifies to which groups a useful representation should be invariant and show that this criterion is sufficient to guarantee that the scattering transform has desirable stability and invariance properties. Additionally, we consider finite measure spaces that are obtained from randomly sampling an unknown manifold. We propose two methods for constructing a data-driven graph on which the associated graph scattering transform approximates the scattering transform on the underlying manifold. Moreover, we use a diffusion-maps based approach to prove quantitative estimates on the rate of convergence of one of these approximations as the number of sample points tends to infinity. Lastly, we showcase the utility of our method on spherical images, directed graphs, and on high-dimensional single-cell data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.08561v2-abstract-full').style.display = 'none'; document.getElementById('2208.08561v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T07 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.07458">arXiv:2208.07458</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2208.07458">pdf</a>, <a href="https://arxiv.org/format/2208.07458">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Learnable Filters for Geometric Scattering Modules </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Wenkel%2C+F">Frederik Wenkel</a>, <a href="/search/cs?searchtype=author&amp;query=Bhaskar%2C+D">Dhananjay Bhaskar</a>, <a href="/search/cs?searchtype=author&amp;query=Macdonald%2C+K">Kincaid Macdonald</a>, <a href="/search/cs?searchtype=author&amp;query=Grady%2C+J">Jackson Grady</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M">Michael Perlmutter</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.07458v1-abstract-short" style="display: inline;"> We propose a new graph neural network (GNN) module, based on relaxations of recently proposed geometric scattering transforms, which consist of a cascade of graph wavelet filters. Our learnable geometric scattering (LEGS) module enables adaptive tuning of the wavelets to encourage band-pass features to emerge in learned representations. The incorporation of our LEGS-module in GNNs enables the lear&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.07458v1-abstract-full').style.display = 'inline'; document.getElementById('2208.07458v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.07458v1-abstract-full" style="display: none;"> We propose a new graph neural network (GNN) module, based on relaxations of recently proposed geometric scattering transforms, which consist of a cascade of graph wavelet filters. Our learnable geometric scattering (LEGS) module enables adaptive tuning of the wavelets to encourage band-pass features to emerge in learned representations. The incorporation of our LEGS-module in GNNs enables the learning of longer-range graph relations compared to many popular GNNs, which often rely on encoding graph structure via smoothness or similarity between neighbors. Further, its wavelet priors result in simplified architectures with significantly fewer learned parameters compared to competing GNNs. We demonstrate the predictive performance of LEGS-based networks on graph classification benchmarks, as well as the descriptive quality of their learned features in biochemical graph data exploration tasks. Our results show that LEGS-based networks match or outperforms popular GNNs, as well as the original geometric scattering construction, on many datasets, in particular in biochemical domains, while retaining certain mathematical properties of handcrafted (non-learned) geometric scattering. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.07458v1-abstract-full').style.display = 'none'; document.getElementById('2208.07458v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 3 figures, 10 tables. arXiv admin note: substantial text overlap with arXiv:2010.02415</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.14928">arXiv:2206.14928</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.14928">pdf</a>, <a href="https://arxiv.org/format/2206.14928">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Manifold Interpolating Optimal-Transport Flows for Trajectory Inference </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huguet%2C+G">Guillaume Huguet</a>, <a href="/search/cs?searchtype=author&amp;query=Magruder%2C+D+S">D. S. Magruder</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Fasina%2C+O">Oluwadamilola Fasina</a>, <a href="/search/cs?searchtype=author&amp;query=Kuchroo%2C+M">Manik Kuchroo</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.14928v2-abstract-short" style="display: inline;"> We present a method called Manifold Interpolating Optimal-Transport Flow (MIOFlow) that learns stochastic, continuous population dynamics from static snapshot samples taken at sporadic timepoints. MIOFlow combines dynamic models, manifold learning, and optimal transport by training neural ordinary differential equations (Neural ODE) to interpolate between static population snapshots as penalized b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.14928v2-abstract-full').style.display = 'inline'; document.getElementById('2206.14928v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.14928v2-abstract-full" style="display: none;"> We present a method called Manifold Interpolating Optimal-Transport Flow (MIOFlow) that learns stochastic, continuous population dynamics from static snapshot samples taken at sporadic timepoints. MIOFlow combines dynamic models, manifold learning, and optimal transport by training neural ordinary differential equations (Neural ODE) to interpolate between static population snapshots as penalized by optimal transport with manifold ground distance. Further, we ensure that the flow follows the geometry by operating in the latent space of an autoencoder that we call a geodesic autoencoder (GAE). In GAE the latent space distance between points is regularized to match a novel multiscale geodesic distance on the data manifold that we define. We show that this method is superior to normalizing flows, Schr枚dinger bridges and other generative models that are designed to flow from noise to data in terms of interpolating between populations. Theoretically, we link these trajectories with dynamic optimal transport. We evaluate our method on simulated data with bifurcations and merges, as well as scRNA-seq data from embryoid body differentiation, and acute myeloid leukemia treatment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.14928v2-abstract-full').style.display = 'none'; document.getElementById('2206.14928v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Presented at NeurIPS 2022, 24 pages, 7 tables, 14 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.10078">arXiv:2206.10078</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.10078">pdf</a>, <a href="https://arxiv.org/format/2206.10078">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Numerical Analysis">math.NA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> The Manifold Scattering Transform for High-Dimensional Point Cloud Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chew%2C+J">Joyce Chew</a>, <a href="/search/cs?searchtype=author&amp;query=Steach%2C+H+R">Holly R. Steach</a>, <a href="/search/cs?searchtype=author&amp;query=Viswanath%2C+S">Siddharth Viswanath</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+H">Hau-Tieng Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Hirn%2C+M">Matthew Hirn</a>, <a href="/search/cs?searchtype=author&amp;query=Needell%2C+D">Deanna Needell</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M">Michael Perlmutter</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.10078v2-abstract-short" style="display: inline;"> The manifold scattering transform is a deep feature extractor for data defined on a Riemannian manifold. It is one of the first examples of extending convolutional neural network-like operators to general manifolds. The initial work on this model focused primarily on its theoretical stability and invariance properties but did not provide methods for its numerical implementation except in the case&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.10078v2-abstract-full').style.display = 'inline'; document.getElementById('2206.10078v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.10078v2-abstract-full" style="display: none;"> The manifold scattering transform is a deep feature extractor for data defined on a Riemannian manifold. It is one of the first examples of extending convolutional neural network-like operators to general manifolds. The initial work on this model focused primarily on its theoretical stability and invariance properties but did not provide methods for its numerical implementation except in the case of two-dimensional surfaces with predefined meshes. In this work, we present practical schemes, based on the theory of diffusion maps, for implementing the manifold scattering transform to datasets arising in naturalistic systems, such as single cell genetics, where the data is a high-dimensional point cloud modeled as lying on a low-dimensional manifold. We show that our methods are effective for signal classification and manifold classification tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.10078v2-abstract-full').style.display = 'none'; document.getElementById('2206.10078v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication in the TAG in DS Workshop at ICML. For subsequent theoretical guarantees, please see Section 6 of arXiv:2208.08561</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T07 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.6 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.03977">arXiv:2206.03977</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.03977">pdf</a>, <a href="https://arxiv.org/format/2206.03977">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Diffusion Curvature for Estimating Local Curvature in High Dimensional Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bhaskar%2C+D">Dhananjay Bhaskar</a>, <a href="/search/cs?searchtype=author&amp;query=MacDonald%2C+K">Kincaid MacDonald</a>, <a href="/search/cs?searchtype=author&amp;query=Fasina%2C+O">Oluwadamilola Fasina</a>, <a href="/search/cs?searchtype=author&amp;query=Thomas%2C+D">Dawson Thomas</a>, <a href="/search/cs?searchtype=author&amp;query=Rieck%2C+B">Bastian Rieck</a>, <a href="/search/cs?searchtype=author&amp;query=Adelstein%2C+I">Ian Adelstein</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.03977v1-abstract-short" style="display: inline;"> We introduce a new intrinsic measure of local curvature on point-cloud data called diffusion curvature. Our measure uses the framework of diffusion maps, including the data diffusion operator, to structure point cloud data and define local curvature based on the laziness of a random walk starting at a point or region of the data. We show that this laziness directly relates to volume comparison res&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.03977v1-abstract-full').style.display = 'inline'; document.getElementById('2206.03977v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.03977v1-abstract-full" style="display: none;"> We introduce a new intrinsic measure of local curvature on point-cloud data called diffusion curvature. Our measure uses the framework of diffusion maps, including the data diffusion operator, to structure point cloud data and define local curvature based on the laziness of a random walk starting at a point or region of the data. We show that this laziness directly relates to volume comparison results from Riemannian geometry. We then extend this scalar curvature notion to an entire quadratic form using neural network estimations based on the diffusion map of point-cloud data. We show applications of both estimations on toy data, single-cell data, and on estimating local Hessian matrices of neural network loss landscapes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.03977v1-abstract-full').style.display = 'none'; document.getElementById('2206.03977v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Thirty-sixth Conference on Neural Information Processing Systems (NeurIPS 2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.14860">arXiv:2203.14860</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.14860">pdf</a>, <a href="https://arxiv.org/format/2203.14860">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Time-inhomogeneous diffusion geometry and topology </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huguet%2C+G">Guillaume Huguet</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Rieck%2C+B">Bastian Rieck</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+J">Jessie Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Kuchroo%2C+M">Manik Kuchroo</a>, <a href="/search/cs?searchtype=author&amp;query=Hirn%2C+M">Matthew Hirn</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.14860v2-abstract-short" style="display: inline;"> Diffusion condensation is a dynamic process that yields a sequence of multiscale data representations that aim to encode meaningful abstractions. It has proven effective for manifold learning, denoising, clustering, and visualization of high-dimensional data. Diffusion condensation is constructed as a time-inhomogeneous process where each step first computes and then applies a diffusion operator t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.14860v2-abstract-full').style.display = 'inline'; document.getElementById('2203.14860v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.14860v2-abstract-full" style="display: none;"> Diffusion condensation is a dynamic process that yields a sequence of multiscale data representations that aim to encode meaningful abstractions. It has proven effective for manifold learning, denoising, clustering, and visualization of high-dimensional data. Diffusion condensation is constructed as a time-inhomogeneous process where each step first computes and then applies a diffusion operator to the data. We theoretically analyze the convergence and evolution of this process from geometric, spectral, and topological perspectives. From a geometric perspective, we obtain convergence bounds based on the smallest transition probability and the radius of the data, whereas from a spectral perspective, our bounds are based on the eigenspectrum of the diffusion kernel. Our spectral results are of particular interest since most of the literature on data diffusion is focused on homogeneous processes. From a topological perspective, we show diffusion condensation generalizes centroid-based hierarchical clustering. We use this perspective to obtain a bound based on the number of data points, independent of their location. To understand the evolution of the data geometry beyond convergence, we use topological data analysis. We show that the condensation process itself defines an intrinsic condensation homology. We use this intrinsic topology as well as the ambient persistent homology of the condensation process to study how the data changes over diffusion time. We demonstrate both types of topological information in well-understood toy examples. Our work gives theoretical insights into the convergence of diffusion condensation, and shows that it provides a link between topological and geometric data analysis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.14860v2-abstract-full').style.display = 'none'; document.getElementById('2203.14860v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.09948">arXiv:2201.09948</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.09948">pdf</a>, <a href="https://arxiv.org/format/2201.09948">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> ReLSO: A Transformer-based Model for Latent Space Optimization and Generation of Proteins </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Castro%2C+E">Egbert Castro</a>, <a href="/search/cs?searchtype=author&amp;query=Godavarthi%2C+A">Abhinav Godavarthi</a>, <a href="/search/cs?searchtype=author&amp;query=Rubinfien%2C+J">Julian Rubinfien</a>, <a href="/search/cs?searchtype=author&amp;query=Givechian%2C+K+B">Kevin B. Givechian</a>, <a href="/search/cs?searchtype=author&amp;query=Bhaskar%2C+D">Dhananjay Bhaskar</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.09948v2-abstract-short" style="display: inline;"> The development of powerful natural language models have increased the ability to learn meaningful representations of protein sequences. In addition, advances in high-throughput mutagenesis, directed evolution, and next-generation sequencing have allowed for the accumulation of large amounts of labeled fitness data. Leveraging these two trends, we introduce Regularized Latent Space Optimization (R&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.09948v2-abstract-full').style.display = 'inline'; document.getElementById('2201.09948v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.09948v2-abstract-full" style="display: none;"> The development of powerful natural language models have increased the ability to learn meaningful representations of protein sequences. In addition, advances in high-throughput mutagenesis, directed evolution, and next-generation sequencing have allowed for the accumulation of large amounts of labeled fitness data. Leveraging these two trends, we introduce Regularized Latent Space Optimization (ReLSO), a deep transformer-based autoencoder which features a highly structured latent space that is trained to jointly generate sequences as well as predict fitness. Through regularized prediction heads, ReLSO introduces a powerful protein sequence encoder and novel approach for efficient fitness landscape traversal. Using ReLSO, we explicitly model the sequence-function landscape of large labeled datasets and generate new molecules by optimizing within the latent space using gradient-based methods. We evaluate this approach on several publicly-available protein datasets, including variant sets of anti-ranibizumab and GFP. We observe a greater sequence optimization efficiency (increase in fitness per optimization step) by ReLSO compared to other approaches, where ReLSO more robustly generates high-fitness sequences. Furthermore, the attention-based relationships learned by the jointly-trained ReLSO models provides a potential avenue towards sequence-level fitness attribution information. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.09948v2-abstract-full').style.display = 'none'; document.getElementById('2201.09948v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.00622">arXiv:2201.00622</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.00622">pdf</a>, <a href="https://arxiv.org/format/2201.00622">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Learning shared neural manifolds from multi-subject FMRI data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+J">Jessie Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Busch%2C+E+L">Erica L. Busch</a>, <a href="/search/cs?searchtype=author&amp;query=Wallenstein%2C+T">Tom Wallenstein</a>, <a href="/search/cs?searchtype=author&amp;query=Gerasimiuk%2C+M">Michal Gerasimiuk</a>, <a href="/search/cs?searchtype=author&amp;query=Benz%2C+A">Andrew Benz</a>, <a href="/search/cs?searchtype=author&amp;query=Lajoie%2C+G">Guillaume Lajoie</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Turk-Browne%2C+N+B">Nicholas B. Turk-Browne</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.00622v1-abstract-short" style="display: inline;"> Functional magnetic resonance imaging (fMRI) is a notoriously noisy measurement of brain activity because of the large variations between individuals, signals marred by environmental differences during collection, and spatiotemporal averaging required by the measurement resolution. In addition, the data is extremely high dimensional, with the space of the activity typically having much lower intri&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.00622v1-abstract-full').style.display = 'inline'; document.getElementById('2201.00622v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.00622v1-abstract-full" style="display: none;"> Functional magnetic resonance imaging (fMRI) is a notoriously noisy measurement of brain activity because of the large variations between individuals, signals marred by environmental differences during collection, and spatiotemporal averaging required by the measurement resolution. In addition, the data is extremely high dimensional, with the space of the activity typically having much lower intrinsic dimension. In order to understand the connection between stimuli of interest and brain activity, and analyze differences and commonalities between subjects, it becomes important to learn a meaningful embedding of the data that denoises, and reveals its intrinsic structure. Specifically, we assume that while noise varies significantly between individuals, true responses to stimuli will share common, low-dimensional features between subjects which are jointly discoverable. Similar approaches have been exploited previously but they have mainly used linear methods such as PCA and shared response modeling (SRM). In contrast, we propose a neural network called MRMD-AE (manifold-regularized multiple decoder, autoencoder), that learns a common embedding from multiple subjects in an experiment while retaining the ability to decode to individual raw fMRI signals. We show that our learned common space represents an extensible manifold (where new points not seen during training can be mapped), improves the classification accuracy of stimulus features of unseen timepoints, as well as improves cross-subject translation of fMRI signals. We believe this framework can be used for many downstream applications such as guided brain-computer interface (BCI) training in the future. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.00622v1-abstract-full').style.display = 'none'; document.getElementById('2201.00622v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.10452">arXiv:2111.10452</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2111.10452">pdf</a>, <a href="https://arxiv.org/format/2111.10452">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> MURAL: An Unsupervised Random Forest-Based Embedding for Electronic Health Record Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gerasimiuk%2C+M">Michal Gerasimiuk</a>, <a href="/search/cs?searchtype=author&amp;query=Shung%2C+D">Dennis Shung</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Stanley%2C+A">Adrian Stanley</a>, <a href="/search/cs?searchtype=author&amp;query=Schultz%2C+M">Michael Schultz</a>, <a href="/search/cs?searchtype=author&amp;query=Ngu%2C+J">Jeffrey Ngu</a>, <a href="/search/cs?searchtype=author&amp;query=Laine%2C+L">Loren Laine</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.10452v1-abstract-short" style="display: inline;"> A major challenge in embedding or visualizing clinical patient data is the heterogeneity of variable types including continuous lab values, categorical diagnostic codes, as well as missing or incomplete data. In particular, in EHR data, some variables are {\em missing not at random (MNAR)} but deliberately not collected and thus are a source of information. For example, lab tests may be deemed nec&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.10452v1-abstract-full').style.display = 'inline'; document.getElementById('2111.10452v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.10452v1-abstract-full" style="display: none;"> A major challenge in embedding or visualizing clinical patient data is the heterogeneity of variable types including continuous lab values, categorical diagnostic codes, as well as missing or incomplete data. In particular, in EHR data, some variables are {\em missing not at random (MNAR)} but deliberately not collected and thus are a source of information. For example, lab tests may be deemed necessary for some patients on the basis of suspected diagnosis, but not for others. Here we present the MURAL forest -- an unsupervised random forest for representing data with disparate variable types (e.g., categorical, continuous, MNAR). MURAL forests consist of a set of decision trees where node-splitting variables are chosen at random, such that the marginal entropy of all other variables is minimized by the split. This allows us to also split on MNAR variables and discrete variables in a way that is consistent with the continuous variables. The end goal is to learn the MURAL embedding of patients using average tree distances between those patients. These distances can be fed to nonlinear dimensionality reduction method like PHATE to derive visualizable embeddings. While such methods are ubiquitous in continuous-valued datasets (like single cell RNA-sequencing) they have not been used extensively in mixed variable data. We showcase the use of our method on one artificial and two clinical datasets. We show that using our approach, we can visualize and classify data more accurately than competing approaches. Finally, we show that MURAL can also be used to compare cohorts of patients via the recently proposed tree-sliced Wasserstein distances. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.10452v1-abstract-full').style.display = 'none'; document.getElementById('2111.10452v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.06241">arXiv:2110.06241</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.06241">pdf</a>, <a href="https://arxiv.org/format/2110.06241">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Molecular Graph Generation via Geometric Scattering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bhaskar%2C+D">Dhananjay Bhaskar</a>, <a href="/search/cs?searchtype=author&amp;query=Grady%2C+J+D">Jackson D. Grady</a>, <a href="/search/cs?searchtype=author&amp;query=Perlmutter%2C+M+A">Michael A. Perlmutter</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.06241v1-abstract-short" style="display: inline;"> Graph neural networks (GNNs) have been used extensively for addressing problems in drug design and discovery. Both ligand and target molecules are represented as graphs with node and edge features encoding information about atomic elements and bonds respectively. Although existing deep learning models perform remarkably well at predicting physicochemical properties and binding affinities, the gene&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.06241v1-abstract-full').style.display = 'inline'; document.getElementById('2110.06241v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.06241v1-abstract-full" style="display: none;"> Graph neural networks (GNNs) have been used extensively for addressing problems in drug design and discovery. Both ligand and target molecules are represented as graphs with node and edge features encoding information about atomic elements and bonds respectively. Although existing deep learning models perform remarkably well at predicting physicochemical properties and binding affinities, the generation of new molecules with optimized properties remains challenging. Inherently, most GNNs perform poorly in whole-graph representation due to the limitations of the message-passing paradigm. Furthermore, step-by-step graph generation frameworks that use reinforcement learning or other sequential processing can be slow and result in a high proportion of invalid molecules with substantial post-processing needed in order to satisfy the principles of stoichiometry. To address these issues, we propose a representation-first approach to molecular graph generation. We guide the latent representation of an autoencoder by capturing graph structure information with the geometric scattering transform and apply penalties that structure the representation also by molecular properties. We show that this highly structured latent space can be directly used for molecular graph generation by the use of a GAN. We demonstrate that our architecture learns meaningful representations of drug datasets and provides a platform for goal-directed drug synthesis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.06241v1-abstract-full').style.display = 'none'; document.getElementById('2110.06241v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.12334">arXiv:2107.12334</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.12334">pdf</a>, <a href="https://arxiv.org/format/2107.12334">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Embedding Signals on Knowledge Graphs with Unbalanced Diffusion Earth Mover&#39;s Distance </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Huguet%2C+G">Guillaume Huguet</a>, <a href="/search/cs?searchtype=author&amp;query=Shung%2C+D">Dennis Shung</a>, <a href="/search/cs?searchtype=author&amp;query=Natik%2C+A">Amine Natik</a>, <a href="/search/cs?searchtype=author&amp;query=Kuchroo%2C+M">Manik Kuchroo</a>, <a href="/search/cs?searchtype=author&amp;query=Lajoie%2C+G">Guillaume Lajoie</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.12334v2-abstract-short" style="display: inline;"> In modern relational machine learning it is common to encounter large graphs that arise via interactions or similarities between observations in many domains. Further, in many cases the target entities for analysis are actually signals on such graphs. We propose to compare and organize such datasets of graph signals by using an earth mover&#39;s distance (EMD) with a geodesic cost over the underlying&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.12334v2-abstract-full').style.display = 'inline'; document.getElementById('2107.12334v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.12334v2-abstract-full" style="display: none;"> In modern relational machine learning it is common to encounter large graphs that arise via interactions or similarities between observations in many domains. Further, in many cases the target entities for analysis are actually signals on such graphs. We propose to compare and organize such datasets of graph signals by using an earth mover&#39;s distance (EMD) with a geodesic cost over the underlying graph. Typically, EMD is computed by optimizing over the cost of transporting one probability distribution to another over an underlying metric space. However, this is inefficient when computing the EMD between many signals. Here, we propose an unbalanced graph EMD that efficiently embeds the unbalanced EMD on an underlying graph into an $L^1$ space, whose metric we call unbalanced diffusion earth mover&#39;s distance (UDEMD). Next, we show how this gives distances between graph signals that are robust to noise. Finally, we apply this to organizing patients based on clinical notes, embedding cells modeled as signals on a gene graph, and organizing genes modeled as signals over a large cell graph. In each case, we show that UDEMD-based embeddings find accurate distances that are highly efficient compared to other methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.12334v2-abstract-full').style.display = 'none'; document.getElementById('2107.12334v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 5 figures, ICASSP 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.10057">arXiv:2103.10057</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2103.10057">pdf</a>, <a href="https://arxiv.org/format/2103.10057">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TNS.2021.3122452">10.1109/TNS.2021.3122452 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Immersive Operation of a Semi-Autonomous Aerial Platform for Detecting and Mapping Radiation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dayani%2C+P">P. Dayani</a>, <a href="/search/cs?searchtype=author&amp;query=Orr%2C+N">N. Orr</a>, <a href="/search/cs?searchtype=author&amp;query=Thomopoulos%2C+A">A. Thomopoulos</a>, <a href="/search/cs?searchtype=author&amp;query=Saran%2C+V">V. Saran</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">S. Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+E">E. Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+N">N. Hu</a>, <a href="/search/cs?searchtype=author&amp;query=McPherson%2C+D">D. McPherson</a>, <a href="/search/cs?searchtype=author&amp;query=Menke%2C+J">J. Menke</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+A">A. Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Vetter%2C+K">K. Vetter</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.10057v1-abstract-short" style="display: inline;"> Recent advancements in radiation detection and computer vision have enabled small unmanned aerial systems (sUASs) to produce 3D nuclear radiation maps in real-time. Currently these state-of-the-art systems still require two operators: one to pilot the sUAS and another operator to monitor the detected radiation. In this work we present a system that integrates real-time 3D radiation visualization w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.10057v1-abstract-full').style.display = 'inline'; document.getElementById('2103.10057v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.10057v1-abstract-full" style="display: none;"> Recent advancements in radiation detection and computer vision have enabled small unmanned aerial systems (sUASs) to produce 3D nuclear radiation maps in real-time. Currently these state-of-the-art systems still require two operators: one to pilot the sUAS and another operator to monitor the detected radiation. In this work we present a system that integrates real-time 3D radiation visualization with semi-autonomous sUAS control. Our Virtual Reality interface enables a single operator to define trajectories using waypoints to abstract complex flight control and utilize the semi-autonomous maneuvering capabilities of the sUAS. The interface also displays a fused radiation visualization and environment map, thereby enabling simultaneous remote operation and radiation monitoring by a single operator. This serves as the basis for development of a single system that deploys and autonomously controls fleets of sUASs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.10057v1-abstract-full').style.display = 'none'; document.getElementById('2103.10057v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">3 pages, 2 figures. The first three authors contributed equally. Accepted to the 2020 IEEE Nuclear Science Symposium &amp; Medical Imaging Conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.12833">arXiv:2102.12833</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.12833">pdf</a>, <a href="https://arxiv.org/format/2102.12833">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Diffusion Earth Mover&#39;s Distance and Distribution Embeddings </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Huguet%2C+G">Guillaume Huguet</a>, <a href="/search/cs?searchtype=author&amp;query=Natik%2C+A">Amine Natik</a>, <a href="/search/cs?searchtype=author&amp;query=MacDonald%2C+K">Kincaid MacDonald</a>, <a href="/search/cs?searchtype=author&amp;query=Kuchroo%2C+M">Manik Kuchroo</a>, <a href="/search/cs?searchtype=author&amp;query=Coifman%2C+R">Ronald Coifman</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.12833v2-abstract-short" style="display: inline;"> We propose a new fast method of measuring distances between large numbers of related high dimensional datasets called the Diffusion Earth Mover&#39;s Distance (EMD). We model the datasets as distributions supported on common data graph that is derived from the affinity matrix computed on the combined data. In such cases where the graph is a discretization of an underlying Riemannian closed manifold, w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.12833v2-abstract-full').style.display = 'inline'; document.getElementById('2102.12833v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.12833v2-abstract-full" style="display: none;"> We propose a new fast method of measuring distances between large numbers of related high dimensional datasets called the Diffusion Earth Mover&#39;s Distance (EMD). We model the datasets as distributions supported on common data graph that is derived from the affinity matrix computed on the combined data. In such cases where the graph is a discretization of an underlying Riemannian closed manifold, we prove that Diffusion EMD is topologically equivalent to the standard EMD with a geodesic ground distance. Diffusion EMD can be computed in $\tilde{O}(n)$ time and is more accurate than similarly fast algorithms such as tree-based EMDs. We also show Diffusion EMD is fully differentiable, making it amenable to future uses in gradient-descent frameworks such as deep neural networks. Finally, we demonstrate an application of Diffusion EMD to single cell data collected from 210 COVID-19 patient samples at Yale New Haven Hospital. Here, Diffusion EMD can derive distances between patients on the manifold of cells at least two orders of magnitude faster than equally accurate methods. This distance matrix between patients can be embedded into a higher level patient manifold which uncovers structure and heterogeneity in patients. More generally, Diffusion EMD is applicable to all datasets that are massively collected in parallel in many medical and biological systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.12833v2-abstract-full').style.display = 'none'; document.getElementById('2102.12833v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Presented at ICML 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.06757">arXiv:2102.06757</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.06757">pdf</a>, <a href="https://arxiv.org/format/2102.06757">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Multimodal Data Visualization and Denoising with Integrated Diffusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kuchroo%2C+M">Manik Kuchroo</a>, <a href="/search/cs?searchtype=author&amp;query=Godavarthi%2C+A">Abhinav Godavarthi</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.06757v3-abstract-short" style="display: inline;"> We propose a method called integrated diffusion for combining multimodal datasets, or data gathered via several different measurements on the same system, to create a joint data diffusion operator. As real world data suffers from both local and global noise, we introduce mechanisms to optimally calculate a diffusion operator that reflects the combined information from both modalities. We show the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.06757v3-abstract-full').style.display = 'inline'; document.getElementById('2102.06757v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.06757v3-abstract-full" style="display: none;"> We propose a method called integrated diffusion for combining multimodal datasets, or data gathered via several different measurements on the same system, to create a joint data diffusion operator. As real world data suffers from both local and global noise, we introduce mechanisms to optimally calculate a diffusion operator that reflects the combined information from both modalities. We show the utility of this joint operator in data denoising, visualization and clustering, performing better than other methods to integrate and analyze multimodal data. We apply our method to multi-omic data generated from blood cells, measuring both gene expression and chromatin accessibility. Our approach better visualizes the geometry of the joint data, captures known cross-modality associations and identifies known cellular populations. More generally, integrated diffusion is broadly applicable to multimodal datasets generated in many medical and biological systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.06757v3-abstract-full').style.display = 'none'; document.getElementById('2102.06757v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.00485">arXiv:2102.00485</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.00485">pdf</a>, <a href="https://arxiv.org/format/2102.00485">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Exploring the Geometry and Topology of Neural Network Loss Landscapes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Horoi%2C+S">Stefan Horoi</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+J">Jessie Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Rieck%2C+B">Bastian Rieck</a>, <a href="/search/cs?searchtype=author&amp;query=Lajoie%2C+G">Guillaume Lajoie</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.00485v2-abstract-short" style="display: inline;"> Recent work has established clear links between the generalization performance of trained neural networks and the geometry of their loss landscape near the local minima to which they converge. This suggests that qualitative and quantitative examination of the loss landscape geometry could yield insights about neural network generalization performance during training. To this end, researchers have&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.00485v2-abstract-full').style.display = 'inline'; document.getElementById('2102.00485v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.00485v2-abstract-full" style="display: none;"> Recent work has established clear links between the generalization performance of trained neural networks and the geometry of their loss landscape near the local minima to which they converge. This suggests that qualitative and quantitative examination of the loss landscape geometry could yield insights about neural network generalization performance during training. To this end, researchers have proposed visualizing the loss landscape through the use of simple dimensionality reduction techniques. However, such visualization methods have been limited by their linear nature and only capture features in one or two dimensions, thus restricting sampling of the loss landscape to lines or planes. Here, we expand and improve upon these in three ways. First, we present a novel &#34;jump and retrain&#34; procedure for sampling relevant portions of the loss landscape. We show that the resulting sampled data holds more meaningful information about the network&#39;s ability to generalize. Next, we show that non-linear dimensionality reduction of the jump and retrain trajectories via PHATE, a trajectory and manifold-preserving method, allows us to visualize differences between networks that are generalizing well vs poorly. Finally, we combine PHATE trajectories with a computational homology characterization to quantify trajectory differences. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.00485v2-abstract-full').style.display = 'none'; document.getElementById('2102.00485v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at the 20th Symposium on Intelligent Data Analysis (IDA) 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.02415">arXiv:2010.02415</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2010.02415">pdf</a>, <a href="https://arxiv.org/format/2010.02415">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Data-Driven Learning of Geometric Scattering Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Wenkel%2C+F">Frederik Wenkel</a>, <a href="/search/cs?searchtype=author&amp;query=MacDonald%2C+K">Kincaid MacDonald</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.02415v3-abstract-short" style="display: inline;"> We propose a new graph neural network (GNN) module, based on relaxations of recently proposed geometric scattering transforms, which consist of a cascade of graph wavelet filters. Our learnable geometric scattering (LEGS) module enables adaptive tuning of the wavelets to encourage band-pass features to emerge in learned representations. The incorporation of our LEGS-module in GNNs enables the lear&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.02415v3-abstract-full').style.display = 'inline'; document.getElementById('2010.02415v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.02415v3-abstract-full" style="display: none;"> We propose a new graph neural network (GNN) module, based on relaxations of recently proposed geometric scattering transforms, which consist of a cascade of graph wavelet filters. Our learnable geometric scattering (LEGS) module enables adaptive tuning of the wavelets to encourage band-pass features to emerge in learned representations. The incorporation of our LEGS-module in GNNs enables the learning of longer-range graph relations compared to many popular GNNs, which often rely on encoding graph structure via smoothness or similarity between neighbors. Further, its wavelet priors result in simplified architectures with significantly fewer learned parameters compared to competing GNNs. We demonstrate the predictive performance of LEGS-based networks on graph classification benchmarks, as well as the descriptive quality of their learned features in biochemical graph data exploration tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.02415v3-abstract-full').style.display = 'none'; document.getElementById('2010.02415v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 2 figures, 3 tables, Presented at IEEE MLSP 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.13291">arXiv:2006.13291</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2006.13291">pdf</a>, <a href="https://arxiv.org/format/2006.13291">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Image-to-image Mapping with Many Domains by Sparse Attribute Transfer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Amodio%2C+M">Matthew Amodio</a>, <a href="/search/cs?searchtype=author&amp;query=Assouel%2C+R">Rim Assouel</a>, <a href="/search/cs?searchtype=author&amp;query=Schmidt%2C+V">Victor Schmidt</a>, <a href="/search/cs?searchtype=author&amp;query=Sylvain%2C+T">Tristan Sylvain</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Bengio%2C+Y">Yoshua Bengio</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.13291v1-abstract-short" style="display: inline;"> Unsupervised image-to-image translation consists of learning a pair of mappings between two domains without known pairwise correspondences between points. The current convention is to approach this task with cycle-consistent GANs: using a discriminator to encourage the generator to change the image to match the target domain, while training the generator to be inverted with another mapping. While&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.13291v1-abstract-full').style.display = 'inline'; document.getElementById('2006.13291v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.13291v1-abstract-full" style="display: none;"> Unsupervised image-to-image translation consists of learning a pair of mappings between two domains without known pairwise correspondences between points. The current convention is to approach this task with cycle-consistent GANs: using a discriminator to encourage the generator to change the image to match the target domain, while training the generator to be inverted with another mapping. While ending up with paired inverse functions may be a good end result, enforcing this restriction at all times during training can be a hindrance to effective modeling. We propose an alternate approach that directly restricts the generator to performing a simple sparse transformation in a latent layer, motivated by recent work from cognitive neuroscience suggesting an architectural prior on representations corresponding to consciousness. Our biologically motivated approach leads to representations more amenable to transformation by disentangling high-level abstract concepts in the latent space. We demonstrate that image-to-image domain translation with many different domains can be learned more effectively with our architecturally constrained, simple transformation than with previous unconstrained architectures that rely on a cycle-consistency loss. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.13291v1-abstract-full').style.display = 'none'; document.getElementById('2006.13291v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.07882">arXiv:2006.07882</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2006.07882">pdf</a>, <a href="https://arxiv.org/format/2006.07882">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Algebraic Topology">math.AT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Uncovering the Topology of Time-Varying fMRI Data using Cubical Persistence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Rieck%2C+B">Bastian Rieck</a>, <a href="/search/cs?searchtype=author&amp;query=Yates%2C+T">Tristan Yates</a>, <a href="/search/cs?searchtype=author&amp;query=Bock%2C+C">Christian Bock</a>, <a href="/search/cs?searchtype=author&amp;query=Borgwardt%2C+K">Karsten Borgwardt</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Turk-Browne%2C+N">Nicholas Turk-Browne</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.07882v2-abstract-short" style="display: inline;"> Functional magnetic resonance imaging (fMRI) is a crucial technology for gaining insights into cognitive processes in humans. Data amassed from fMRI measurements result in volumetric data sets that vary over time. However, analysing such data presents a challenge due to the large degree of noise and person-to-person variation in how information is represented in the brain. To address this challeng&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.07882v2-abstract-full').style.display = 'inline'; document.getElementById('2006.07882v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.07882v2-abstract-full" style="display: none;"> Functional magnetic resonance imaging (fMRI) is a crucial technology for gaining insights into cognitive processes in humans. Data amassed from fMRI measurements result in volumetric data sets that vary over time. However, analysing such data presents a challenge due to the large degree of noise and person-to-person variation in how information is represented in the brain. To address this challenge, we present a novel topological approach that encodes each time point in an fMRI data set as a persistence diagram of topological features, i.e. high-dimensional voids present in the data. This representation naturally does not rely on voxel-by-voxel correspondence and is robust to noise. We show that these time-varying persistence diagrams can be clustered to find meaningful groupings between participants, and that they are also useful in studying within-subject brain state trajectories of subjects performing a particular task. Here, we apply both clustering and trajectory analysis techniques to a group of participants watching the movie &#39;Partly Cloudy&#39;. We observe significant differences in both brain state trajectories and overall topological activity between adults and children watching the same movie. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.07882v2-abstract-full').style.display = 'none'; document.getElementById('2006.07882v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at the Conference on Neural Information Processing Systems (NeurIPS) 2020; camera-ready version</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.06885">arXiv:2006.06885</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2006.06885">pdf</a>, <a href="https://arxiv.org/format/2006.06885">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Uncovering the Folding Landscape of RNA Secondary Structure with Deep Graph Embeddings </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Castro%2C+E">Egbert Castro</a>, <a href="/search/cs?searchtype=author&amp;query=Benz%2C+A">Andrew Benz</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.06885v3-abstract-short" style="display: inline;"> Biomolecular graph analysis has recently gained much attention in the emerging field of geometric deep learning. Here we focus on organizing biomolecular graphs in ways that expose meaningful relations and variations between them. We propose a geometric scattering autoencoder (GSAE) network for learning such graph embeddings. Our embedding network first extracts rich graph features using the recen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.06885v3-abstract-full').style.display = 'inline'; document.getElementById('2006.06885v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.06885v3-abstract-full" style="display: none;"> Biomolecular graph analysis has recently gained much attention in the emerging field of geometric deep learning. Here we focus on organizing biomolecular graphs in ways that expose meaningful relations and variations between them. We propose a geometric scattering autoencoder (GSAE) network for learning such graph embeddings. Our embedding network first extracts rich graph features using the recently proposed geometric scattering transform. Then, it leverages a semi-supervised variational autoencoder to extract a low-dimensional embedding that retains the information in these features that enable prediction of molecular properties as well as characterize graphs. We show that GSAE organizes RNA graphs both by structure and energy, accurately reflecting bistable RNA structures. Also, the model is generative and can sample new folding trajectories. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.06885v3-abstract-full').style.display = 'none'; document.getElementById('2006.06885v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 10 figures, 4 tables, Presented at IEEE Big Data 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.09381">arXiv:2003.09381</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2003.09381">pdf</a>, <a href="https://arxiv.org/format/2003.09381">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> The application of $蟽$-LFSR in Key-Dependent Feedback Configuration for Word-Oriented Stream Ciphers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nandi%2C+S">Subrata Nandi</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Srinivasan Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Zolfaghari%2C+B">Behrouz Zolfaghari</a>, <a href="/search/cs?searchtype=author&amp;query=Mitra%2C+P">Pinaki Mitra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.09381v2-abstract-short" style="display: inline;"> In this paper, we propose and evaluate a method for generating key-dependent feedback configurations (KDFC) for $蟽$-LFSRs. $蟽$-LFSRs with such configurations can be applied to any stream cipher that uses a word-based LFSR. Here, a configuration generation algorithm uses the secret key(K) and the initialization vector (IV) to generate a feedback configuration. We have mathematically analysed the fe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.09381v2-abstract-full').style.display = 'inline'; document.getElementById('2003.09381v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.09381v2-abstract-full" style="display: none;"> In this paper, we propose and evaluate a method for generating key-dependent feedback configurations (KDFC) for $蟽$-LFSRs. $蟽$-LFSRs with such configurations can be applied to any stream cipher that uses a word-based LFSR. Here, a configuration generation algorithm uses the secret key(K) and the initialization vector (IV) to generate a feedback configuration. We have mathematically analysed the feedback configurations generated by this method. As a test case, we have applied this method on SNOW 2.0 and have studied its impact on resistance to various attacks. Further, we have also tested the generated keystream for randomness and have briefly described its implementation and the challenges involved in the same. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.09381v2-abstract-full').style.display = 'none'; document.getElementById('2003.09381v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.04461">arXiv:2002.04461</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2002.04461">pdf</a>, <a href="https://arxiv.org/format/2002.04461">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> TrajectoryNet: A Dynamic Optimal Transport Network for Modeling Cellular Dynamics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tong%2C+A">Alexander Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+J">Jessie Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=van+Dijk%2C+D">David van Dijk</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.04461v2-abstract-short" style="display: inline;"> It is increasingly common to encounter data from dynamic processes captured by static cross-sectional measurements over time, particularly in biomedical settings. Recent attempts to model individual trajectories from this data use optimal transport to create pairwise matchings between time points. However, these methods cannot model continuous dynamics and non-linear paths that entities can take i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.04461v2-abstract-full').style.display = 'inline'; document.getElementById('2002.04461v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.04461v2-abstract-full" style="display: none;"> It is increasingly common to encounter data from dynamic processes captured by static cross-sectional measurements over time, particularly in biomedical settings. Recent attempts to model individual trajectories from this data use optimal transport to create pairwise matchings between time points. However, these methods cannot model continuous dynamics and non-linear paths that entities can take in these systems. To address this issue, we establish a link between continuous normalizing flows and dynamic optimal transport, that allows us to model the expected paths of points over time. Continuous normalizing flows are generally under constrained, as they are allowed to take an arbitrary path from the source to the target distribution. We present TrajectoryNet, which controls the continuous paths taken between distributions to produce dynamic optimal transport. We show how this is particularly applicable for studying cellular dynamics in data from single-cell RNA sequencing (scRNA-seq) technologies, and that TrajectoryNet improves upon recently proposed static optimal transport-based models that can be used for interpolating cellular distributions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.04461v2-abstract-full').style.display = 'none'; document.getElementById('2002.04461v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Presented at ICML 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.03847">arXiv:2002.03847</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2002.03847">pdf</a>, <a href="https://arxiv.org/format/2002.03847">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Making Logic Learnable With Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Brudermueller%2C+T">Tobias Brudermueller</a>, <a href="/search/cs?searchtype=author&amp;query=Shung%2C+D+L">Dennis L. Shung</a>, <a href="/search/cs?searchtype=author&amp;query=Stanley%2C+A+J">Adrian J. Stanley</a>, <a href="/search/cs?searchtype=author&amp;query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.03847v3-abstract-short" style="display: inline;"> While neural networks are good at learning unspecified functions from training samples, they cannot be directly implemented in hardware and are often not interpretable or formally verifiable. On the other hand, logic circuits are implementable, verifiable, and interpretable but are not able to learn from training data in a generalizable way. We propose a novel logic learning pipeline that combines&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.03847v3-abstract-full').style.display = 'inline'; document.getElementById('2002.03847v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.03847v3-abstract-full" style="display: none;"> While neural networks are good at learning unspecified functions from training samples, they cannot be directly implemented in hardware and are often not interpretable or formally verifiable. On the other hand, logic circuits are implementable, verifiable, and interpretable but are not able to learn from training data in a generalizable way. We propose a novel logic learning pipeline that combines the advantages of neural networks and logic circuits. Our pipeline first trains a neural network on a classification task, and then translates this, first to random forests, and then to AND-Inverter logic. We show that our pipeline maintains greater accuracy than naive translations to logic, and minimizes the logic such that it is more interpretable and has decreased hardware cost. We show the utility of our pipeline on a network that is trained on biomedical data. This approach could be applied to patient care to provide risk stratification and guide clinical decision-making. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.03847v3-abstract-full').style.display = 'none'; document.getElementById('2002.03847v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1910.06270">arXiv:1910.06270</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1910.06270">pdf</a>, <a href="https://arxiv.org/format/1910.06270">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Fully Homomorphic Encryption based on Multivariate Polynomial Evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dowerah%2C+U">Uddipana Dowerah</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Srinivasan Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1910.06270v2-abstract-short" style="display: inline;"> We propose a multi-bit leveled fully homomorphic encryption scheme using multivariate polynomial evaluations. The security of the scheme depends on the hardness of the Learning with Errors (LWE) problem. For homomorphic multiplication, the scheme uses a polynomial based technique that does not require relinearization (and key switching). The noise associated with the ciphertext increases only line&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.06270v2-abstract-full').style.display = 'inline'; document.getElementById('1910.06270v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1910.06270v2-abstract-full" style="display: none;"> We propose a multi-bit leveled fully homomorphic encryption scheme using multivariate polynomial evaluations. The security of the scheme depends on the hardness of the Learning with Errors (LWE) problem. For homomorphic multiplication, the scheme uses a polynomial based technique that does not require relinearization (and key switching). The noise associated with the ciphertext increases only linearly with every multiplication. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.06270v2-abstract-full').style.display = 'none'; document.getElementById('1910.06270v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1908.02831">arXiv:1908.02831</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1908.02831">pdf</a>, <a href="https://arxiv.org/format/1908.02831">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Visualizing the PHATE of Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gigante%2C+S">Scott Gigante</a>, <a href="/search/cs?searchtype=author&amp;query=Charles%2C+A+S">Adam S. Charles</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a>, <a href="/search/cs?searchtype=author&amp;query=Mishne%2C+G">Gal Mishne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1908.02831v1-abstract-short" style="display: inline;"> Understanding why and how certain neural networks outperform others is key to guiding future development of network architectures and optimization methods. To this end, we introduce a novel visualization algorithm that reveals the internal geometry of such networks: Multislice PHATE (M-PHATE), the first method designed explicitly to visualize how a neural network&#39;s hidden representations of data e&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.02831v1-abstract-full').style.display = 'inline'; document.getElementById('1908.02831v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1908.02831v1-abstract-full" style="display: none;"> Understanding why and how certain neural networks outperform others is key to guiding future development of network architectures and optimization methods. To this end, we introduce a novel visualization algorithm that reveals the internal geometry of such networks: Multislice PHATE (M-PHATE), the first method designed explicitly to visualize how a neural network&#39;s hidden representations of data evolve throughout the course of training. We demonstrate that our visualization provides intuitive, detailed summaries of the learning dynamics beyond simple global measures (i.e., validation loss and accuracy), without the need to access validation data. Furthermore, M-PHATE better captures both the dynamics and community structure of the hidden units as compared to visualization based on standard dimensionality reduction methods (e.g., ISOMAP, t-SNE). We demonstrate M-PHATE with two vignettes: continual learning and generalization. In the former, the M-PHATE visualizations display the mechanism of &#34;catastrophic forgetting&#34; which is a major challenge for learning in task-switching contexts. In the latter, our visualizations reveal how increased heterogeneity among hidden units correlates with improved generalization performance. An implementation of M-PHATE, along with scripts to reproduce the figures in this paper, is available at https://github.com/scottgigante/M-PHATE. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.02831v1-abstract-full').style.display = 'none'; document.getElementById('1908.02831v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Neural Information Processing Systems (2019) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.04463">arXiv:1907.04463</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1907.04463">pdf</a>, <a href="https://arxiv.org/format/1907.04463">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/BigData47090.2019.9006013">10.1109/BigData47090.2019.9006013 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Coarse Graining of Data via Inhomogeneous Diffusion Condensation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Brugnone%2C+N">Nathan Brugnone</a>, <a href="/search/cs?searchtype=author&amp;query=Gonopolskiy%2C+A">Alex Gonopolskiy</a>, <a href="/search/cs?searchtype=author&amp;query=Moyle%2C+M+W">Mark W. Moyle</a>, <a href="/search/cs?searchtype=author&amp;query=Kuchroo%2C+M">Manik Kuchroo</a>, <a href="/search/cs?searchtype=author&amp;query=van+Dijk%2C+D">David van Dijk</a>, <a href="/search/cs?searchtype=author&amp;query=Moon%2C+K+R">Kevin R. Moon</a>, <a href="/search/cs?searchtype=author&amp;query=Colon-Ramos%2C+D">Daniel Colon-Ramos</a>, <a href="/search/cs?searchtype=author&amp;query=Wolf%2C+G">Guy Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Hirn%2C+M+J">Matthew J. Hirn</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.04463v3-abstract-short" style="display: inline;"> Big data often has emergent structure that exists at multiple levels of abstraction, which are useful for characterizing complex interactions and dynamics of the observations. Here, we consider multiple levels of abstraction via a multiresolution geometry of data points at different granularities. To construct this geometry we define a time-inhomogeneous diffusion process that effectively condense&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.04463v3-abstract-full').style.display = 'inline'; document.getElementById('1907.04463v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.04463v3-abstract-full" style="display: none;"> Big data often has emergent structure that exists at multiple levels of abstraction, which are useful for characterizing complex interactions and dynamics of the observations. Here, we consider multiple levels of abstraction via a multiresolution geometry of data points at different granularities. To construct this geometry we define a time-inhomogeneous diffusion process that effectively condenses data points together to uncover nested groupings at larger and larger granularities. This inhomogeneous process creates a deep cascade of intrinsic low pass filters on the data affinity graph that are applied in sequence to gradually eliminate local variability while adjusting the learned data geometry to increasingly coarser resolutions. We provide visualizations to exhibit our method as a continuously-hierarchical clustering with directions of eliminated variation highlighted at each step. The utility of our algorithm is demonstrated via neuronal data condensation, where the constructed multiresolution data geometry uncovers the organization, grouping, and connectivity between neurons. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.04463v3-abstract-full').style.display = 'none'; document.getElementById('1907.04463v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 7 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.5.3 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proceedings of the 2019 IEEE International Conference on Big Data, pages 2624-2633, 2019 </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Krishnaswamy%2C+S&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Krishnaswamy%2C+S&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Krishnaswamy%2C+S&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10