CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;21 of 21 results for author: <span class="mathjax">Daw, A</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Daw%2C+A">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Daw, A"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Daw%2C+A&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Daw, A"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13553">arXiv:2411.13553</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13553">pdf</a>, <a href="https://arxiv.org/format/2411.13553">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> AI-generated Image Detection: Passive or Watermark? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Guo%2C+M">Moyang Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Y">Yuepeng Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhengyuan Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zeyu Li</a>, <a href="/search/cs?searchtype=author&amp;query=Sadovnik%2C+A">Amir Sadovnik</a>, <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+N">Neil Gong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13553v1-abstract-short" style="display: inline;"> While text-to-image models offer numerous benefits, they also pose significant societal risks. Detecting AI-generated images is crucial for mitigating these risks. Detection methods can be broadly categorized into passive and watermark-based approaches: passive detectors rely on artifacts present in AI-generated images, whereas watermark-based detectors proactively embed watermarks into such image&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13553v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13553v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13553v1-abstract-full" style="display: none;"> While text-to-image models offer numerous benefits, they also pose significant societal risks. Detecting AI-generated images is crucial for mitigating these risks. Detection methods can be broadly categorized into passive and watermark-based approaches: passive detectors rely on artifacts present in AI-generated images, whereas watermark-based detectors proactively embed watermarks into such images. A key question is which type of detector performs better in terms of effectiveness, robustness, and efficiency. However, the current literature lacks a comprehensive understanding of this issue. In this work, we aim to bridge that gap by developing ImageDetectBench, the first comprehensive benchmark to compare the effectiveness, robustness, and efficiency of passive and watermark-based detectors. Our benchmark includes four datasets, each containing a mix of AI-generated and non-AI-generated images. We evaluate five passive detectors and four watermark-based detectors against eight types of common perturbations and three types of adversarial perturbations. Our benchmark results reveal several interesting findings. For instance, watermark-based detectors consistently outperform passive detectors, both in the presence and absence of perturbations. Based on these insights, we provide recommendations for detecting AI-generated images, e.g., when both types of detectors are applicable, watermark-based detectors should be the preferred choice. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13553v1-abstract-full').style.display = 'none'; document.getElementById('2411.13553v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.13010">arXiv:2410.13010</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.13010">pdf</a>, <a href="https://arxiv.org/format/2410.13010">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Hiding-in-Plain-Sight (HiPS) Attack on CLIP for Targetted Object Removal from Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Chung%2C+M+H">Megan Hong-Thanh Chung</a>, <a href="/search/cs?searchtype=author&amp;query=Mahbub%2C+M">Maria Mahbub</a>, <a href="/search/cs?searchtype=author&amp;query=Sadovnik%2C+A">Amir Sadovnik</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.13010v1-abstract-short" style="display: inline;"> Machine learning models are known to be vulnerable to adversarial attacks, but traditional attacks have mostly focused on single-modalities. With the rise of large multi-modal models (LMMs) like CLIP, which combine vision and language capabilities, new vulnerabilities have emerged. However, prior work in multimodal targeted attacks aim to completely change the model&#39;s output to what the adversary&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.13010v1-abstract-full').style.display = 'inline'; document.getElementById('2410.13010v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.13010v1-abstract-full" style="display: none;"> Machine learning models are known to be vulnerable to adversarial attacks, but traditional attacks have mostly focused on single-modalities. With the rise of large multi-modal models (LMMs) like CLIP, which combine vision and language capabilities, new vulnerabilities have emerged. However, prior work in multimodal targeted attacks aim to completely change the model&#39;s output to what the adversary wants. In many realistic scenarios, an adversary might seek to make only subtle modifications to the output, so that the changes go unnoticed by downstream models or even by humans. We introduce Hiding-in-Plain-Sight (HiPS) attacks, a novel class of adversarial attacks that subtly modifies model predictions by selectively concealing target object(s), as if the target object was absent from the scene. We propose two HiPS attack variants, HiPS-cls and HiPS-cap, and demonstrate their effectiveness in transferring to downstream image captioning models, such as CLIP-Cap, for targeted object removal from image captions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.13010v1-abstract-full').style.display = 'none'; document.getElementById('2410.13010v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in the 3rd Workshop on New Frontiers in Adversarial Machine Learning at NeurIPS 2024. 10 pages, 7 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.11247">arXiv:2410.11247</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.11247">pdf</a>, <a href="https://arxiv.org/format/2410.11247">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Mathematical Physics">math-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Geophysics">physics.geo-ph</span> </div> </div> <p class="title is-5 mathjax"> A Unified Framework for Forward and Inverse Problems in Subsurface Imaging using Latent Space Translations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+N">Naveen Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Sawhney%2C+M">Medha Sawhney</a>, <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Y">Youzuo Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.11247v2-abstract-short" style="display: inline;"> In subsurface imaging, learning the mapping from velocity maps to seismic waveforms (forward problem) and waveforms to velocity (inverse problem) is important for several applications. While traditional techniques for solving forward and inverse problems are computationally prohibitive, there is a growing interest in leveraging recent advances in deep learning to learn the mapping between velocity&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11247v2-abstract-full').style.display = 'inline'; document.getElementById('2410.11247v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.11247v2-abstract-full" style="display: none;"> In subsurface imaging, learning the mapping from velocity maps to seismic waveforms (forward problem) and waveforms to velocity (inverse problem) is important for several applications. While traditional techniques for solving forward and inverse problems are computationally prohibitive, there is a growing interest in leveraging recent advances in deep learning to learn the mapping between velocity maps and seismic waveform images directly from data. Despite the variety of architectures explored in previous works, several open questions still remain unanswered such as the effect of latent space sizes, the importance of manifold learning, the complexity of translation models, and the value of jointly solving forward and inverse problems. We propose a unified framework to systematically characterize prior research in this area termed the Generalized Forward-Inverse (GFI) framework, building on the assumption of manifolds and latent space translations. We show that GFI encompasses previous works in deep learning for subsurface imaging, which can be viewed as specific instantiations of GFI. We also propose two new model architectures within the framework of GFI: Latent U-Net and Invertible X-Net, leveraging the power of U-Nets for domain translation and the ability of IU-Nets to simultaneously learn forward and inverse translations, respectively. We show that our proposed models achieve state-of-the-art (SOTA) performance for forward and inverse problems on a wide range of synthetic datasets, and also investigate their zero-shot effectiveness on two real-world-like datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11247v2-abstract-full').style.display = 'none'; document.getElementById('2410.11247v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.02335">arXiv:2409.02335</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.02335">pdf</a>, <a href="https://arxiv.org/format/2409.02335">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> What Do You See in Common? Learning Hierarchical Prototypes over Tree-of-Life to Discover Evolutionary Traits </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Manogaran%2C+H+B">Harish Babu Manogaran</a>, <a href="/search/cs?searchtype=author&amp;query=Maruf%2C+M">M. Maruf</a>, <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Mehrab%2C+K+S">Kazi Sajeed Mehrab</a>, <a href="/search/cs?searchtype=author&amp;query=Charpentier%2C+C+P">Caleb Patrick Charpentier</a>, <a href="/search/cs?searchtype=author&amp;query=Uyeda%2C+J+C">Josef C. Uyeda</a>, <a href="/search/cs?searchtype=author&amp;query=Dahdul%2C+W">Wasila Dahdul</a>, <a href="/search/cs?searchtype=author&amp;query=Thompson%2C+M+J">Matthew J Thompson</a>, <a href="/search/cs?searchtype=author&amp;query=Campolongo%2C+E+G">Elizabeth G Campolongo</a>, <a href="/search/cs?searchtype=author&amp;query=Provost%2C+K+L">Kaiya L Provost</a>, <a href="/search/cs?searchtype=author&amp;query=Mabee%2C+P+M">Paula M. Mabee</a>, <a href="/search/cs?searchtype=author&amp;query=Lapp%2C+H">Hilmar Lapp</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.02335v1-abstract-short" style="display: inline;"> A grand challenge in biology is to discover evolutionary traits - features of organisms common to a group of species with a shared ancestor in the tree of life (also referred to as phylogenetic tree). With the growing availability of image repositories in biology, there is a tremendous opportunity to discover evolutionary traits directly from images in the form of a hierarchy of prototypes. Howeve&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.02335v1-abstract-full').style.display = 'inline'; document.getElementById('2409.02335v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.02335v1-abstract-full" style="display: none;"> A grand challenge in biology is to discover evolutionary traits - features of organisms common to a group of species with a shared ancestor in the tree of life (also referred to as phylogenetic tree). With the growing availability of image repositories in biology, there is a tremendous opportunity to discover evolutionary traits directly from images in the form of a hierarchy of prototypes. However, current prototype-based methods are mostly designed to operate over a flat structure of classes and face several challenges in discovering hierarchical prototypes, including the issue of learning over-specific features at internal nodes. To overcome these challenges, we introduce the framework of Hierarchy aligned Commonality through Prototypical Networks (HComP-Net). We empirically show that HComP-Net learns prototypes that are accurate, semantically consistent, and generalizable to unseen species in comparison to baselines on birds, butterflies, and fishes datasets. The code and datasets are available at https://github.com/Imageomics/HComPNet. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.02335v1-abstract-full').style.display = 'none'; document.getElementById('2409.02335v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">34 pages, 27 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.16176">arXiv:2408.16176</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.16176">pdf</a>, <a href="https://arxiv.org/format/2408.16176">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> VLM4Bio: A Benchmark Dataset to Evaluate Pretrained Vision-Language Models for Trait Discovery from Biological Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Maruf%2C+M">M. Maruf</a>, <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Mehrab%2C+K+S">Kazi Sajeed Mehrab</a>, <a href="/search/cs?searchtype=author&amp;query=Manogaran%2C+H+B">Harish Babu Manogaran</a>, <a href="/search/cs?searchtype=author&amp;query=Neog%2C+A">Abhilash Neog</a>, <a href="/search/cs?searchtype=author&amp;query=Sawhney%2C+M">Medha Sawhney</a>, <a href="/search/cs?searchtype=author&amp;query=Khurana%2C+M">Mridul Khurana</a>, <a href="/search/cs?searchtype=author&amp;query=Balhoff%2C+J+P">James P. Balhoff</a>, <a href="/search/cs?searchtype=author&amp;query=Bakis%2C+Y">Yasin Bakis</a>, <a href="/search/cs?searchtype=author&amp;query=Altintas%2C+B">Bahadir Altintas</a>, <a href="/search/cs?searchtype=author&amp;query=Thompson%2C+M+J">Matthew J. Thompson</a>, <a href="/search/cs?searchtype=author&amp;query=Campolongo%2C+E+G">Elizabeth G. Campolongo</a>, <a href="/search/cs?searchtype=author&amp;query=Uyeda%2C+J+C">Josef C. Uyeda</a>, <a href="/search/cs?searchtype=author&amp;query=Lapp%2C+H">Hilmar Lapp</a>, <a href="/search/cs?searchtype=author&amp;query=Bart%2C+H+L">Henry L. Bart</a>, <a href="/search/cs?searchtype=author&amp;query=Mabee%2C+P+M">Paula M. Mabee</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+Y">Yu Su</a>, <a href="/search/cs?searchtype=author&amp;query=Chao%2C+W">Wei-Lun Chao</a>, <a href="/search/cs?searchtype=author&amp;query=Stewart%2C+C">Charles Stewart</a>, <a href="/search/cs?searchtype=author&amp;query=Berger-Wolf%2C+T">Tanya Berger-Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Dahdul%2C+W">Wasila Dahdul</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.16176v1-abstract-short" style="display: inline;"> Images are increasingly becoming the currency for documenting biodiversity on the planet, providing novel opportunities for accelerating scientific discoveries in the field of organismal biology, especially with the advent of large vision-language models (VLMs). We ask if pre-trained VLMs can aid scientists in answering a range of biologically relevant questions without any additional fine-tuning.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.16176v1-abstract-full').style.display = 'inline'; document.getElementById('2408.16176v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.16176v1-abstract-full" style="display: none;"> Images are increasingly becoming the currency for documenting biodiversity on the planet, providing novel opportunities for accelerating scientific discoveries in the field of organismal biology, especially with the advent of large vision-language models (VLMs). We ask if pre-trained VLMs can aid scientists in answering a range of biologically relevant questions without any additional fine-tuning. In this paper, we evaluate the effectiveness of 12 state-of-the-art (SOTA) VLMs in the field of organismal biology using a novel dataset, VLM4Bio, consisting of 469K question-answer pairs involving 30K images from three groups of organisms: fishes, birds, and butterflies, covering five biologically relevant tasks. We also explore the effects of applying prompting techniques and tests for reasoning hallucination on the performance of VLMs, shedding new light on the capabilities of current SOTA VLMs in answering biologically relevant questions using images. The code and datasets for running all the analyses reported in this paper can be found at https://github.com/sammarfy/VLM4Bio. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.16176v1-abstract-full').style.display = 'none'; document.getElementById('2408.16176v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">36 pages, 37 figures, 7 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.00160">arXiv:2408.00160</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.00160">pdf</a>, <a href="https://arxiv.org/format/2408.00160">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Populations and Evolution">q-bio.PE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Hierarchical Conditioning of Diffusion Models Using Tree-of-Life for Studying Species Evolution </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Khurana%2C+M">Mridul Khurana</a>, <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Maruf%2C+M">M. Maruf</a>, <a href="/search/cs?searchtype=author&amp;query=Uyeda%2C+J+C">Josef C. Uyeda</a>, <a href="/search/cs?searchtype=author&amp;query=Dahdul%2C+W">Wasila Dahdul</a>, <a href="/search/cs?searchtype=author&amp;query=Charpentier%2C+C">Caleb Charpentier</a>, <a href="/search/cs?searchtype=author&amp;query=Bak%C4%B1%C5%9F%2C+Y">Yasin Bak谋艧</a>, <a href="/search/cs?searchtype=author&amp;query=Bart%2C+H+L">Henry L. Bart Jr.</a>, <a href="/search/cs?searchtype=author&amp;query=Mabee%2C+P+M">Paula M. Mabee</a>, <a href="/search/cs?searchtype=author&amp;query=Lapp%2C+H">Hilmar Lapp</a>, <a href="/search/cs?searchtype=author&amp;query=Balhoff%2C+J+P">James P. Balhoff</a>, <a href="/search/cs?searchtype=author&amp;query=Chao%2C+W">Wei-Lun Chao</a>, <a href="/search/cs?searchtype=author&amp;query=Stewart%2C+C">Charles Stewart</a>, <a href="/search/cs?searchtype=author&amp;query=Berger-Wolf%2C+T">Tanya Berger-Wolf</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.00160v1-abstract-short" style="display: inline;"> A central problem in biology is to understand how organisms evolve and adapt to their environment by acquiring variations in the observable characteristics or traits of species across the tree of life. With the growing availability of large-scale image repositories in biology and recent advances in generative modeling, there is an opportunity to accelerate the discovery of evolutionary traits auto&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.00160v1-abstract-full').style.display = 'inline'; document.getElementById('2408.00160v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.00160v1-abstract-full" style="display: none;"> A central problem in biology is to understand how organisms evolve and adapt to their environment by acquiring variations in the observable characteristics or traits of species across the tree of life. With the growing availability of large-scale image repositories in biology and recent advances in generative modeling, there is an opportunity to accelerate the discovery of evolutionary traits automatically from images. Toward this goal, we introduce Phylo-Diffusion, a novel framework for conditioning diffusion models with phylogenetic knowledge represented in the form of HIERarchical Embeddings (HIER-Embeds). We also propose two new experiments for perturbing the embedding space of Phylo-Diffusion: trait masking and trait swapping, inspired by counterpart experiments of gene knockout and gene editing/swapping. Our work represents a novel methodological advance in generative modeling to structure the embedding space of diffusion models using tree-based knowledge. Our work also opens a new chapter of research in evolutionary biology by using generative models to visualize evolutionary changes directly from images. We empirically demonstrate the usefulness of Phylo-Diffusion in capturing meaningful trait variations for fishes and birds, revealing novel insights about the biological mechanisms of their evolution. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.00160v1-abstract-full').style.display = 'none'; document.getElementById('2408.00160v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.08027">arXiv:2407.08027</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.08027">pdf</a>, <a href="https://arxiv.org/format/2407.08027">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Fish-Vista: A Multi-Purpose Dataset for Understanding &amp; Identification of Traits from Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mehrab%2C+K+S">Kazi Sajeed Mehrab</a>, <a href="/search/cs?searchtype=author&amp;query=Maruf%2C+M">M. Maruf</a>, <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Manogaran%2C+H+B">Harish Babu Manogaran</a>, <a href="/search/cs?searchtype=author&amp;query=Neog%2C+A">Abhilash Neog</a>, <a href="/search/cs?searchtype=author&amp;query=Khurana%2C+M">Mridul Khurana</a>, <a href="/search/cs?searchtype=author&amp;query=Altintas%2C+B">Bahadir Altintas</a>, <a href="/search/cs?searchtype=author&amp;query=Bakis%2C+Y">Yasin Bakis</a>, <a href="/search/cs?searchtype=author&amp;query=Campolongo%2C+E+G">Elizabeth G Campolongo</a>, <a href="/search/cs?searchtype=author&amp;query=Thompson%2C+M+J">Matthew J Thompson</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xiaojun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lapp%2C+H">Hilmar Lapp</a>, <a href="/search/cs?searchtype=author&amp;query=Chao%2C+W">Wei-Lun Chao</a>, <a href="/search/cs?searchtype=author&amp;query=Mabee%2C+P+M">Paula M. Mabee</a>, <a href="/search/cs?searchtype=author&amp;query=Bart%2C+H+L">Henry L. Bart Jr.</a>, <a href="/search/cs?searchtype=author&amp;query=Dahdul%2C+W">Wasila Dahdul</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.08027v1-abstract-short" style="display: inline;"> Fishes are integral to both ecological systems and economic sectors, and studying fish traits is crucial for understanding biodiversity patterns and macro-evolution trends. To enable the analysis of visual traits from fish images, we introduce the Fish-Visual Trait Analysis (Fish-Vista) dataset - a large, annotated collection of about 60K fish images spanning 1900 different species, supporting sev&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.08027v1-abstract-full').style.display = 'inline'; document.getElementById('2407.08027v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.08027v1-abstract-full" style="display: none;"> Fishes are integral to both ecological systems and economic sectors, and studying fish traits is crucial for understanding biodiversity patterns and macro-evolution trends. To enable the analysis of visual traits from fish images, we introduce the Fish-Visual Trait Analysis (Fish-Vista) dataset - a large, annotated collection of about 60K fish images spanning 1900 different species, supporting several challenging and biologically relevant tasks including species classification, trait identification, and trait segmentation. These images have been curated through a sophisticated data processing pipeline applied to a cumulative set of images obtained from various museum collections. Fish-Vista provides fine-grained labels of various visual traits present in each image. It also offers pixel-level annotations of 9 different traits for 2427 fish images, facilitating additional trait segmentation and localization tasks. The ultimate goal of Fish-Vista is to provide a clean, carefully curated, high-resolution dataset that can serve as a foundation for accelerating biological discoveries using advances in AI. Finally, we provide a comprehensive analysis of state-of-the-art deep learning techniques on Fish-Vista. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.08027v1-abstract-full').style.display = 'none'; document.getElementById('2407.08027v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.16740">arXiv:2406.16740</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.16740">pdf</a>, <a href="https://arxiv.org/format/2406.16740">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Numerical Analysis">math.NA</span> </div> </div> <p class="title is-5 mathjax"> Learning the boundary-to-domain mapping using Lifting Product Fourier Neural Operators for partial differential equations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kashi%2C+A">Aditya Kashi</a>, <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Meena%2C+M+G">Muralikrishnan Gopalakrishnan Meena</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+H">Hao Lu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.16740v2-abstract-short" style="display: inline;"> Neural operators such as the Fourier Neural Operator (FNO) have been shown to provide resolution-independent deep learning models that can learn mappings between function spaces. For example, an initial condition can be mapped to the solution of a partial differential equation (PDE) at a future time-step using a neural operator. Despite the popularity of neural operators, their use to predict solu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.16740v2-abstract-full').style.display = 'inline'; document.getElementById('2406.16740v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.16740v2-abstract-full" style="display: none;"> Neural operators such as the Fourier Neural Operator (FNO) have been shown to provide resolution-independent deep learning models that can learn mappings between function spaces. For example, an initial condition can be mapped to the solution of a partial differential equation (PDE) at a future time-step using a neural operator. Despite the popularity of neural operators, their use to predict solution functions over a domain given only data over the boundary (such as a spatially varying Dirichlet boundary condition) remains unexplored. In this paper, we refer to such problems as boundary-to-domain problems; they have a wide range of applications in areas such as fluid mechanics, solid mechanics, heat transfer etc. We present a novel FNO-based architecture, named Lifting Product FNO (or LP-FNO) which can map arbitrary boundary functions defined on the lower-dimensional boundary to a solution in the entire domain. Specifically, two FNOs defined on the lower-dimensional boundary are lifted into the higher dimensional domain using our proposed lifting product layer. We demonstrate the efficacy and resolution independence of the proposed LP-FNO for the 2D Poisson equation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.16740v2-abstract-full').style.display = 'none'; document.getElementById('2406.16740v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ICML 2024 AI for Science Workshop</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 65N99; 68T07 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.1; J.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.15533">arXiv:2402.15533</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.15533">pdf</a>, <a href="https://arxiv.org/format/2402.15533">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Performance">cs.PF</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Probability">math.PR</span> </div> </div> <p class="title is-5 mathjax"> Asymmetries of Service: Interdependence and Synchronicity </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Andrew Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Yom-Tov%2C+G+B">Galit B. Yom-Tov</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.15533v1-abstract-short" style="display: inline;"> On many dimensions, services can be seen to exist along spectra measuring the degree of interaction between customer and agent. For instance, every interaction features some number of contributions by each of the two sides, creating a spectrum of interdependence. Additionally, each interaction is further characterized by the pacing of these contributions, implying a spectrum of synchronicity. Wher&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.15533v1-abstract-full').style.display = 'inline'; document.getElementById('2402.15533v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.15533v1-abstract-full" style="display: none;"> On many dimensions, services can be seen to exist along spectra measuring the degree of interaction between customer and agent. For instance, every interaction features some number of contributions by each of the two sides, creating a spectrum of interdependence. Additionally, each interaction is further characterized by the pacing of these contributions, implying a spectrum of synchronicity. Where a service falls on such spectra can simply be a consequence of its design, but it can also be a function of its state. As broadly evidenced empirically, an agent with several concurrent interactions will be slowed in each individual interaction, altering the service&#39;s synchronicity. Here, we study a Hawkes cluster model of the customer-agent interaction, which we show captures both of these service (a)symmetries. We find insightful connections to behavioral operations, such as proving the occurrence of non-monotonic performance (e.g., inverted-U throughput) from concurrency-driven asynchrony. Hence, we can prescribe the agent&#39;s optimal concurrency level. Furthermore, we show how the service design dictates the efficacy of these operational improvements, proving that the concurrency-optimized throughput is itself non-monotonic as a function of the interdependence. In what may be of independent interest methodologically, we establish an interpretable decomposition for Hawkes clusters via probabilistic combinatorics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.15533v1-abstract-full').style.display = 'none'; document.getElementById('2402.15533v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 90B22; 60G55 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.09441">arXiv:2310.09441</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.09441">pdf</a>, <a href="https://arxiv.org/format/2310.09441">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Biological Physics">physics.bio-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> MEMTRACK: A Deep Learning-Based Approach to Microrobot Tracking in Dense and Low-Contrast Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sawhney%2C+M">Medha Sawhney</a>, <a href="/search/cs?searchtype=author&amp;query=Karmarkar%2C+B">Bhas Karmarkar</a>, <a href="/search/cs?searchtype=author&amp;query=Leaman%2C+E+J">Eric J. Leaman</a>, <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a>, <a href="/search/cs?searchtype=author&amp;query=Behkam%2C+B">Bahareh Behkam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.09441v1-abstract-short" style="display: inline;"> Tracking microrobots is challenging, considering their minute size and high speed. As the field progresses towards developing microrobots for biomedical applications and conducting mechanistic studies in physiologically relevant media (e.g., collagen), this challenge is exacerbated by the dense surrounding environments with feature size and shape comparable to microrobots. Herein, we report Motion&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.09441v1-abstract-full').style.display = 'inline'; document.getElementById('2310.09441v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.09441v1-abstract-full" style="display: none;"> Tracking microrobots is challenging, considering their minute size and high speed. As the field progresses towards developing microrobots for biomedical applications and conducting mechanistic studies in physiologically relevant media (e.g., collagen), this challenge is exacerbated by the dense surrounding environments with feature size and shape comparable to microrobots. Herein, we report Motion Enhanced Multi-level Tracker (MEMTrack), a robust pipeline for detecting and tracking microrobots using synthetic motion features, deep learning-based object detection, and a modified Simple Online and Real-time Tracking (SORT) algorithm with interpolation for tracking. Our object detection approach combines different models based on the object&#39;s motion pattern. We trained and validated our model using bacterial micro-motors in collagen (tissue phantom) and tested it in collagen and aqueous media. We demonstrate that MEMTrack accurately tracks even the most challenging bacteria missed by skilled human annotators, achieving precision and recall of 77% and 48% in collagen and 94% and 35% in liquid media, respectively. Moreover, we show that MEMTrack can quantify average bacteria speed with no statistically significant difference from the laboriously-produced manual tracking data. MEMTrack represents a significant contribution to microrobot localization and tracking, and opens the potential for vision-based deep learning approaches to microrobot control in dense and low-contrast settings. All source code for training and testing MEMTrack and reproducing the results of the paper have been made publicly available https://github.com/sawhney-medha/MEMTrack. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.09441v1-abstract-full').style.display = 'none'; document.getElementById('2310.09441v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.11052">arXiv:2308.11052</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.11052">pdf</a>, <a href="https://arxiv.org/format/2308.11052">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Beyond Discriminative Regions: Saliency Maps as Alternatives to CAMs for Weakly Supervised Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Maruf%2C+M">M. Maruf</a>, <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Dutta%2C+A">Amartya Dutta</a>, <a href="/search/cs?searchtype=author&amp;query=Bu%2C+J">Jie Bu</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.11052v1-abstract-short" style="display: inline;"> In recent years, several Weakly Supervised Semantic Segmentation (WS3) methods have been proposed that use class activation maps (CAMs) generated by a classifier to produce pseudo-ground truths for training segmentation models. While CAMs are good at highlighting discriminative regions (DR) of an image, they are known to disregard regions of the object that do not contribute to the classifier&#39;s pr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.11052v1-abstract-full').style.display = 'inline'; document.getElementById('2308.11052v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.11052v1-abstract-full" style="display: none;"> In recent years, several Weakly Supervised Semantic Segmentation (WS3) methods have been proposed that use class activation maps (CAMs) generated by a classifier to produce pseudo-ground truths for training segmentation models. While CAMs are good at highlighting discriminative regions (DR) of an image, they are known to disregard regions of the object that do not contribute to the classifier&#39;s prediction, termed non-discriminative regions (NDR). In contrast, attribution methods such as saliency maps provide an alternative approach for assigning a score to every pixel based on its contribution to the classification prediction. This paper provides a comprehensive comparison between saliencies and CAMs for WS3. Our study includes multiple perspectives on understanding their similarities and dissimilarities. Moreover, we provide new evaluation metrics that perform a comprehensive assessment of WS3 performance of alternative methods w.r.t. CAMs. We demonstrate the effectiveness of saliencies in addressing the limitation of CAMs through our empirical studies on benchmark datasets. Furthermore, we propose random cropping as a stochastic aggregation technique that improves the performance of saliency, making it a strong alternative to CAM for WS3. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.11052v1-abstract-full').style.display = 'none'; document.getElementById('2308.11052v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 13 figures, 4 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.00864">arXiv:2211.00864</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2211.00864">pdf</a>, <a href="https://arxiv.org/format/2211.00864">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Multi-task Learning for Source Attribution and Field Reconstruction for Methane Monitoring </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Yeo%2C+K">Kyongmin Yeo</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a>, <a href="/search/cs?searchtype=author&amp;query=Klein%2C+L">Levente Klein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.00864v1-abstract-short" style="display: inline;"> Inferring the source information of greenhouse gases, such as methane, from spatially sparse sensor observations is an essential element in mitigating climate change. While it is well understood that the complex behavior of the atmospheric dispersion of such pollutants is governed by the Advection-Diffusion equation, it is difficult to directly apply the governing equations to identify the source&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.00864v1-abstract-full').style.display = 'inline'; document.getElementById('2211.00864v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.00864v1-abstract-full" style="display: none;"> Inferring the source information of greenhouse gases, such as methane, from spatially sparse sensor observations is an essential element in mitigating climate change. While it is well understood that the complex behavior of the atmospheric dispersion of such pollutants is governed by the Advection-Diffusion equation, it is difficult to directly apply the governing equations to identify the source location and magnitude (inverse problem) because of the spatially sparse and noisy observations, i.e., the pollution concentration is known only at the sensor locations and sensors sensitivity is limited. Here, we develop a multi-task learning framework that can provide high-fidelity reconstruction of the concentration field and identify emission characteristics of the pollution sources such as their location, emission strength, etc. from sparse sensor observations. We demonstrate that our proposed framework is able to achieve accurate reconstruction of the methane concentrations from sparse sensor measurements as well as precisely pin-point the location and emission strength of these pollution sources. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.00864v1-abstract-full').style.display = 'none'; document.getElementById('2211.00864v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 8 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.02338">arXiv:2207.02338</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2207.02338">pdf</a>, <a href="https://arxiv.org/format/2207.02338">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Mitigating Propagation Failures in Physics-informed Neural Networks using Retain-Resample-Release (R3) Sampling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Bu%2C+J">Jie Bu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Sifan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Perdikaris%2C+P">Paris Perdikaris</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.02338v3-abstract-short" style="display: inline;"> Despite the success of physics-informed neural networks (PINNs) in approximating partial differential equations (PDEs), PINNs can sometimes fail to converge to the correct solution in problems involving complicated PDEs. This is reflected in several recent studies on characterizing the &#34;failure modes&#34; of PINNs, although a thorough understanding of the connection between PINN failure modes and samp&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.02338v3-abstract-full').style.display = 'inline'; document.getElementById('2207.02338v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.02338v3-abstract-full" style="display: none;"> Despite the success of physics-informed neural networks (PINNs) in approximating partial differential equations (PDEs), PINNs can sometimes fail to converge to the correct solution in problems involving complicated PDEs. This is reflected in several recent studies on characterizing the &#34;failure modes&#34; of PINNs, although a thorough understanding of the connection between PINN failure modes and sampling strategies is missing. In this paper, we provide a novel perspective of failure modes of PINNs by hypothesizing that training PINNs relies on successful &#34;propagation&#34; of solution from initial and/or boundary condition points to interior points. We show that PINNs with poor sampling strategies can get stuck at trivial solutions if there are propagation failures, characterized by highly imbalanced PDE residual fields. To mitigate propagation failures, we propose a novel Retain-Resample-Release sampling (R3) algorithm that can incrementally accumulate collocation points in regions of high PDE residuals with little to no computational overhead. We provide an extension of R3 sampling to respect the principle of causality while solving time-dependent PDEs. We theoretically analyze the behavior of R3 sampling and empirically demonstrate its efficacy and efficiency in comparison with baselines on a variety of PDE problems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.02338v3-abstract-full').style.display = 'none'; document.getElementById('2207.02338v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">39 pages, 53 figures, 6 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.00684">arXiv:2110.00684</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.00684">pdf</a>, <a href="https://arxiv.org/format/2110.00684">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Learning Compact Representations of Neural Networks using DiscriminAtive Masking (DAM) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bu%2C+J">Jie Bu</a>, <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Maruf%2C+M">M. Maruf</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.00684v1-abstract-short" style="display: inline;"> A central goal in deep learning is to learn compact representations of features at every layer of a neural network, which is useful for both unsupervised representation learning and structured network pruning. While there is a growing body of work in structured pruning, current state-of-the-art methods suffer from two key limitations: (i) instability during training, and (ii) need for an additiona&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.00684v1-abstract-full').style.display = 'inline'; document.getElementById('2110.00684v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.00684v1-abstract-full" style="display: none;"> A central goal in deep learning is to learn compact representations of features at every layer of a neural network, which is useful for both unsupervised representation learning and structured network pruning. While there is a growing body of work in structured pruning, current state-of-the-art methods suffer from two key limitations: (i) instability during training, and (ii) need for an additional step of fine-tuning, which is resource-intensive. At the core of these limitations is the lack of a systematic approach that jointly prunes and refines weights during training in a single stage, and does not require any fine-tuning upon convergence to achieve state-of-the-art performance. We present a novel single-stage structured pruning method termed DiscriminAtive Masking (DAM). The key intuition behind DAM is to discriminatively prefer some of the neurons to be refined during the training process, while gradually masking out other neurons. We show that our proposed DAM approach has remarkably good performance over various applications, including dimensionality reduction, recommendation system, graph representation learning, and structured pruning for image classification. We also theoretically show that the learning objective of DAM is directly related to minimizing the L0 norm of the masking layer. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.00684v1-abstract-full').style.display = 'none'; document.getElementById('2110.00684v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25 pages, 11 figures, 7 tables, Accepted to NeurIPS 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.02993">arXiv:2106.02993</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.02993">pdf</a>, <a href="https://arxiv.org/format/2106.02993">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3447548.3467449">10.1145/3447548.3467449 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> PID-GAN: A GAN Framework based on a Physics-informed Discriminator for Uncertainty Quantification with Physics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Maruf%2C+M">M. Maruf</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.02993v1-abstract-short" style="display: inline;"> As applications of deep learning (DL) continue to seep into critical scientific use-cases, the importance of performing uncertainty quantification (UQ) with DL has become more pressing than ever before. In scientific applications, it is also important to inform the learning of DL models with knowledge of physics of the problem to produce physically consistent and generalized solutions. This is ref&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.02993v1-abstract-full').style.display = 'inline'; document.getElementById('2106.02993v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.02993v1-abstract-full" style="display: none;"> As applications of deep learning (DL) continue to seep into critical scientific use-cases, the importance of performing uncertainty quantification (UQ) with DL has become more pressing than ever before. In scientific applications, it is also important to inform the learning of DL models with knowledge of physics of the problem to produce physically consistent and generalized solutions. This is referred to as the emerging field of physics-informed deep learning (PIDL). We consider the problem of developing PIDL formulations that can also perform UQ. To this end, we propose a novel physics-informed GAN architecture, termed PID-GAN, where the knowledge of physics is used to inform the learning of both the generator and discriminator models, making ample use of unlabeled data instances. We show that our proposed PID-GAN framework does not suffer from imbalance of generator gradients from multiple loss terms as compared to state-of-the-art. We also empirically demonstrate the efficacy of our proposed framework on a variety of case studies involving benchmark physics-based PDEs as well as imperfect physics. All the code and datasets used in this study have been made available on this link : https://github.com/arkadaw9/PID-GAN. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.02993v1-abstract-full').style.display = 'none'; document.getElementById('2106.02993v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 11 figures, 2 tables, Published at KDD 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.04447">arXiv:2009.04447</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.04447">pdf</a>, <a href="https://arxiv.org/format/2009.04447">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Beyond Observed Connections : Link Injection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bu%2C+J">Jie Bu</a>, <a href="/search/cs?searchtype=author&amp;query=Maruf%2C+M">M. Maruf</a>, <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.04447v1-abstract-short" style="display: inline;"> In this paper, we proposed the \textit{link injection}, a novel method that helps any differentiable graph machine learning models to go beyond observed connections from the input data in an end-to-end learning fashion. It finds out (weak) connections in favor of the current task that is not present in the input data via a parametric link injection layer. We evaluate our method on both node classi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.04447v1-abstract-full').style.display = 'inline'; document.getElementById('2009.04447v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.04447v1-abstract-full" style="display: none;"> In this paper, we proposed the \textit{link injection}, a novel method that helps any differentiable graph machine learning models to go beyond observed connections from the input data in an end-to-end learning fashion. It finds out (weak) connections in favor of the current task that is not present in the input data via a parametric link injection layer. We evaluate our method on both node classification and link prediction tasks using a series of state-of-the-art graph convolution networks. Results show that the link injection helps a variety of models to achieve better performances on both applications. Further empirical analysis shows a great potential of this method in efficiently exploiting unseen connections from the injected links. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.04447v1-abstract-full').style.display = 'none'; document.getElementById('2009.04447v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.07861">arXiv:2004.07861</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2004.07861">pdf</a>, <a href="https://arxiv.org/format/2004.07861">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Probability">math.PR</span> </div> </div> <p class="title is-5 mathjax"> The Co-Production of Service: Modeling Services in Contact Centers Using Hawkes Processes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Andrew Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Castellanos%2C+A">Antonio Castellanos</a>, <a href="/search/cs?searchtype=author&amp;query=Yom-Tov%2C+G+B">Galit B. Yom-Tov</a>, <a href="/search/cs?searchtype=author&amp;query=Pender%2C+J">Jamol Pender</a>, <a href="/search/cs?searchtype=author&amp;query=Gruendlinger%2C+L">Leor Gruendlinger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.07861v5-abstract-short" style="display: inline;"> In customer support contact centers, every service interaction involves a messaging dialogue between a customer and an agent; together, they exchange information, solve problems, and collectively co-produce the service. Because the service progression is shaped by the history of conversation so far, we propose a bivariate, marked Hawkes process cluster model of the customer-agent interaction. To e&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.07861v5-abstract-full').style.display = 'inline'; document.getElementById('2004.07861v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.07861v5-abstract-full" style="display: none;"> In customer support contact centers, every service interaction involves a messaging dialogue between a customer and an agent; together, they exchange information, solve problems, and collectively co-produce the service. Because the service progression is shaped by the history of conversation so far, we propose a bivariate, marked Hawkes process cluster model of the customer-agent interaction. To evaluate our stochastic model of service, we apply it to an industry contact center dataset containing nearly 5 million messages. Through both a novel residual analysis comparison and several Monte Carlo goodness-of-fit tests, we show that the Hawkes cluster model indeed captures dynamics at the heart of the service and also surpasses classic models that do not incorporate the service history. Furthermore, in an entirely data-driven simulation, we demonstrate how this history-dependent model can be leveraged operationally to inform a prediction-based routing policy. We show that widely-used and well-studied customer routing policies can be outperformed with simple modifications according to the Hawkes model. Through analysis of a stylized model proposed in the contact center literature, we prove that service heterogeneity can cause this underperformance and, moreover, that such heterogeneity will occur if service closures are not carefully managed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.07861v5-abstract-full').style.display = 'none'; document.getElementById('2004.07861v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.02682">arXiv:1911.02682</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1911.02682">pdf</a>, <a href="https://arxiv.org/format/1911.02682">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Physics-Guided Architecture (PGA) of Neural Networks for Quantifying Uncertainty in Lake Temperature Modeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Thomas%2C+R+Q">R. Quinn Thomas</a>, <a href="/search/cs?searchtype=author&amp;query=Carey%2C+C+C">Cayelan C. Carey</a>, <a href="/search/cs?searchtype=author&amp;query=Read%2C+J+S">Jordan S. Read</a>, <a href="/search/cs?searchtype=author&amp;query=Appling%2C+A+P">Alison P. Appling</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.02682v1-abstract-short" style="display: inline;"> To simultaneously address the rising need of expressing uncertainties in deep learning models along with producing model outputs which are consistent with the known scientific knowledge, we propose a novel physics-guided architecture (PGA) of neural networks in the context of lake temperature modeling where the physical constraints are hard coded in the neural network architecture. This allows us&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.02682v1-abstract-full').style.display = 'inline'; document.getElementById('1911.02682v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.02682v1-abstract-full" style="display: none;"> To simultaneously address the rising need of expressing uncertainties in deep learning models along with producing model outputs which are consistent with the known scientific knowledge, we propose a novel physics-guided architecture (PGA) of neural networks in the context of lake temperature modeling where the physical constraints are hard coded in the neural network architecture. This allows us to integrate such models with state of the art uncertainty estimation approaches such as Monte Carlo (MC) Dropout without sacrificing the physical consistency of our results. We demonstrate the effectiveness of our approach in ensuring better generalizability as well as physical consistency in MC estimates over data collected from Lake Mendota in Wisconsin and Falling Creek Reservoir in Virginia, even with limited training data. We further show that our MC estimates correctly match the distribution of ground-truth observations, thus making the PGA paradigm amenable to physically grounded uncertainty quantification. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.02682v1-abstract-full').style.display = 'none'; document.getElementById('1911.02682v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 15 figures, 2 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1910.01445">arXiv:1910.01445</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1910.01445">pdf</a>, <a href="https://arxiv.org/format/1910.01445">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applications">stat.AP</span> </div> </div> <p class="title is-5 mathjax"> Analyzing the Spotify Top 200 Through a Point Process Lens </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Harris%2C+M">Michelangelo Harris</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+B">Brian Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Park%2C+C">Cean Park</a>, <a href="/search/cs?searchtype=author&amp;query=Ramireddy%2C+R">Ravi Ramireddy</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+G">Gloria Ren</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+M">Max Ren</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+S">Shangdi Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Andrew Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Pender%2C+J">Jamol Pender</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1910.01445v1-abstract-short" style="display: inline;"> Every generation throws a hero up the pop charts. For the current generation, one of the most relevant pop charts is the Spotify Top 200. Spotify is the world&#39;s largest music streaming service and the Top 200 is a daily list of the platform&#39;s 200 most streamed songs. In this paper, we analyze a data set collected from over 20 months of these rankings. Via exploratory data analysis, we investigate&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.01445v1-abstract-full').style.display = 'inline'; document.getElementById('1910.01445v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1910.01445v1-abstract-full" style="display: none;"> Every generation throws a hero up the pop charts. For the current generation, one of the most relevant pop charts is the Spotify Top 200. Spotify is the world&#39;s largest music streaming service and the Top 200 is a daily list of the platform&#39;s 200 most streamed songs. In this paper, we analyze a data set collected from over 20 months of these rankings. Via exploratory data analysis, we investigate the popularity, rarity, and longevity of songs on the Top 200 and we construct a stochastic process model for the daily streaming counts that draws upon ideas from stochastic intensity point processes and marked point processes. Using the parameters of this model as estimated from the Top 200 data, we apply a clustering algorithm to identify songs with similar features and performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.01445v1-abstract-full').style.display = 'none'; document.getElementById('1910.01445v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 60G55; 62M05; 62H30 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.12650">arXiv:1907.12650</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1907.12650">pdf</a>, <a href="https://arxiv.org/format/1907.12650">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Performance">cs.PF</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Probability">math.PR</span> </div> </div> <p class="title is-5 mathjax"> How to Staff When Customers Arrive in Batches </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Andrew Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Hampshire%2C+R+C">Robert C. Hampshire</a>, <a href="/search/cs?searchtype=author&amp;query=Pender%2C+J">Jamol Pender</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.12650v4-abstract-short" style="display: inline;"> In many different settings, requests for service can arrive in near or true simultaneity with one another. This creates batches of arrivals to the underlying queueing system. In this paper, we study the staffing problem for the batch arrival queue. We show that batches place a dangerous and deceptive stress on services, requiring a high amount of resources and exhibiting a fundamentally larger tai&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.12650v4-abstract-full').style.display = 'inline'; document.getElementById('1907.12650v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.12650v4-abstract-full" style="display: none;"> In many different settings, requests for service can arrive in near or true simultaneity with one another. This creates batches of arrivals to the underlying queueing system. In this paper, we study the staffing problem for the batch arrival queue. We show that batches place a dangerous and deceptive stress on services, requiring a high amount of resources and exhibiting a fundamentally larger tail in those demands. This uncovers a service regime in which a system with large batch arrivals may have low utilization but will still have non-trivial waiting. Methodologically, these staffing results follow from novel large batch and large batch-and-rate limits of the multi-server queueing model. In the large batch limit, we establish the first formal connection between general multi-server queues and storage processes, another family of stochastic models. By consequence, we show that the batch scaled queue length process is not asymptotically normal, and that, in fact, the fluid and diffusion-type limits coincide. Hence, the (safety) staffing of this system must be directly proportional to the batch size just to achieve a non-degenerate probability of wait. In exhibition of the existence and insights of this large batch regime, we apply our results to data on Covid-19 contact tracing in New York City. In doing so, we identify significant benefits produced by the tracing agency&#39;s decision to staff above national recommendations, and we also demonstrate that there may have been an opportunity to further improve the operation by optimizing the arrival pattern in the public health data pipeline. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.12650v4-abstract-full').style.display = 'none'; document.getElementById('1907.12650v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 60K25; 90B22; 90B06 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1710.11431">arXiv:1710.11431</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1710.11431">pdf</a>, <a href="https://arxiv.org/format/1710.11431">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Physics-guided Neural Networks (PGNN): An Application in Lake Temperature Modeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Daw%2C+A">Arka Daw</a>, <a href="/search/cs?searchtype=author&amp;query=Karpatne%2C+A">Anuj Karpatne</a>, <a href="/search/cs?searchtype=author&amp;query=Watkins%2C+W">William Watkins</a>, <a href="/search/cs?searchtype=author&amp;query=Read%2C+J">Jordan Read</a>, <a href="/search/cs?searchtype=author&amp;query=Kumar%2C+V">Vipin Kumar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1710.11431v3-abstract-short" style="display: inline;"> This paper introduces a framework for combining scientific knowledge of physics-based models with neural networks to advance scientific discovery. This framework, termed physics-guided neural networks (PGNN), leverages the output of physics-based model simulations along with observational features in a hybrid modeling setup to generate predictions using a neural network architecture. Further, this&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.11431v3-abstract-full').style.display = 'inline'; document.getElementById('1710.11431v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1710.11431v3-abstract-full" style="display: none;"> This paper introduces a framework for combining scientific knowledge of physics-based models with neural networks to advance scientific discovery. This framework, termed physics-guided neural networks (PGNN), leverages the output of physics-based model simulations along with observational features in a hybrid modeling setup to generate predictions using a neural network architecture. Further, this framework uses physics-based loss functions in the learning objective of neural networks to ensure that the model predictions not only show lower errors on the training set but are also scientifically consistent with the known physics on the unlabeled set. We illustrate the effectiveness of PGNN for the problem of lake temperature modeling, where physical relationships between the temperature, density, and depth of water are used to design a physics-based loss function. By using scientific knowledge to guide the construction and learning of neural networks, we are able to show that the proposed framework ensures better generalizability as well as scientific consistency of results. All the code and datasets used in this study have been made available on this link \url{https://github.com/arkadaw9/PGNN}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.11431v3-abstract-full').style.display = 'none'; document.getElementById('1710.11431v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 October, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2017. </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10