CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 82 results for author: <span class="mathjax">Adams, J</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Adams%2C+J">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Adams, J"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Adams%2C+J&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Adams, J"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Adams%2C+J&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Adams%2C+J&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Adams%2C+J&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.17585">arXiv:2411.17585</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.17585">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Multi-Objective Reinforcement Learning for Automated Resilient Cyber Defence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=O%27Driscoll%2C+R">Ross O&#39;Driscoll</a>, <a href="/search/cs?searchtype=author&amp;query=Hagen%2C+C">Claudia Hagen</a>, <a href="/search/cs?searchtype=author&amp;query=Bater%2C+J">Joe Bater</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+M">James M. Adams</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.17585v1-abstract-short" style="display: inline;"> Cyber-attacks pose a security threat to military command and control networks, Intelligence, Surveillance, and Reconnaissance (ISR) systems, and civilian critical national infrastructure. The use of artificial intelligence and autonomous agents in these attacks increases the scale, range, and complexity of this threat and the subsequent disruption they cause. Autonomous Cyber Defence (ACD) agents&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17585v1-abstract-full').style.display = 'inline'; document.getElementById('2411.17585v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.17585v1-abstract-full" style="display: none;"> Cyber-attacks pose a security threat to military command and control networks, Intelligence, Surveillance, and Reconnaissance (ISR) systems, and civilian critical national infrastructure. The use of artificial intelligence and autonomous agents in these attacks increases the scale, range, and complexity of this threat and the subsequent disruption they cause. Autonomous Cyber Defence (ACD) agents aim to mitigate this threat by responding at machine speed and at the scale required to address the problem. Sequential decision-making algorithms such as Deep Reinforcement Learning (RL) provide a promising route to create ACD agents. These algorithms focus on a single objective such as minimizing the intrusion of red agents on the network, by using a handcrafted weighted sum of rewards. This approach removes the ability to adapt the model during inference, and fails to address the many competing objectives present when operating and protecting these networks. Conflicting objectives, such as restoring a machine from a back-up image, must be carefully balanced with the cost of associated down-time, or the disruption to network traffic or services that might result. Instead of pursing a Single-Objective RL (SORL) approach, here we present a simple example of a multi-objective network defence game that requires consideration of both defending the network against red-agents and maintaining critical functionality of green-agents. Two Multi-Objective Reinforcement Learning (MORL) algorithms, namely Multi-Objective Proximal Policy Optimization (MOPPO), and Pareto-Conditioned Networks (PCN), are used to create two trained ACD agents whose performance is compared on our Multi-Objective Cyber Defence game. The benefits and limitations of MORL ACD agents in comparison to SORL ACD agents are discussed based on the investigations of this game. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17585v1-abstract-full').style.display = 'none'; document.getElementById('2411.17585v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.09707">arXiv:2405.09707</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.09707">pdf</a>, <a href="https://arxiv.org/format/2405.09707">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Point2SSM++: Self-Supervised Learning of Anatomical Shape Models from Point Clouds </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Elhabian%2C+S">Shireen Elhabian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.09707v1-abstract-short" style="display: inline;"> Correspondence-based statistical shape modeling (SSM) stands as a powerful technology for morphometric analysis in clinical research. SSM facilitates population-level characterization and quantification of anatomical shapes such as bones and organs, aiding in pathology and disease diagnostics and treatment planning. Despite its potential, SSM remains under-utilized in medical research due to the s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.09707v1-abstract-full').style.display = 'inline'; document.getElementById('2405.09707v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.09707v1-abstract-full" style="display: none;"> Correspondence-based statistical shape modeling (SSM) stands as a powerful technology for morphometric analysis in clinical research. SSM facilitates population-level characterization and quantification of anatomical shapes such as bones and organs, aiding in pathology and disease diagnostics and treatment planning. Despite its potential, SSM remains under-utilized in medical research due to the significant overhead associated with automatic construction methods, which demand complete, aligned shape surface representations. Additionally, optimization-based techniques rely on bias-inducing assumptions or templates and have prolonged inference times as the entire cohort is simultaneously optimized. To overcome these challenges, we introduce Point2SSM++, a principled, self-supervised deep learning approach that directly learns correspondence points from point cloud representations of anatomical shapes. Point2SSM++ is robust to misaligned and inconsistent input, providing SSM that accurately samples individual shape surfaces while effectively capturing population-level statistics. Additionally, we present principled extensions of Point2SSM++ to adapt it for dynamic spatiotemporal and multi-anatomy use cases, demonstrating the broad versatility of the Point2SSM++ framework. Furthermore, we present extensions of Point2SSM++ tailored for dynamic spatiotemporal and multi-anatomy scenarios, showcasing the broad versatility of the framework. Through extensive validation across diverse anatomies, evaluation metrics, and clinically relevant downstream tasks, we demonstrate Point2SSM++&#39;s superiority over existing state-of-the-art deep learning models and traditional approaches. Point2SSM++ substantially enhances the feasibility of SSM generation and significantly broadens its array of potential clinical applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.09707v1-abstract-full').style.display = 'none'; document.getElementById('2405.09707v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.09697">arXiv:2405.09697</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.09697">pdf</a>, <a href="https://arxiv.org/format/2405.09697">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Weakly Supervised Bayesian Shape Modeling from Unsegmented Medical Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Iyer%2C+K">Krithika Iyer</a>, <a href="/search/cs?searchtype=author&amp;query=Elhabian%2C+S">Shireen Elhabian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.09697v1-abstract-short" style="display: inline;"> Anatomical shape analysis plays a pivotal role in clinical research and hypothesis testing, where the relationship between form and function is paramount. Correspondence-based statistical shape modeling (SSM) facilitates population-level morphometrics but requires a cumbersome, potentially bias-inducing construction pipeline. Recent advancements in deep learning have streamlined this process in in&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.09697v1-abstract-full').style.display = 'inline'; document.getElementById('2405.09697v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.09697v1-abstract-full" style="display: none;"> Anatomical shape analysis plays a pivotal role in clinical research and hypothesis testing, where the relationship between form and function is paramount. Correspondence-based statistical shape modeling (SSM) facilitates population-level morphometrics but requires a cumbersome, potentially bias-inducing construction pipeline. Recent advancements in deep learning have streamlined this process in inference by providing SSM prediction directly from unsegmented medical images. However, the proposed approaches are fully supervised and require utilizing a traditional SSM construction pipeline to create training data, thus inheriting the associated burdens and limitations. To address these challenges, we introduce a weakly supervised deep learning approach to predict SSM from images using point cloud supervision. Specifically, we propose reducing the supervision associated with the state-of-the-art fully Bayesian variational information bottleneck DeepSSM (BVIB-DeepSSM) model. BVIB-DeepSSM is an effective, principled framework for predicting probabilistic anatomical shapes from images with quantification of both aleatoric and epistemic uncertainties. Whereas the original BVIB-DeepSSM method requires strong supervision in the form of ground truth correspondence points, the proposed approach utilizes weak supervision via point cloud surface representations, which are more readily obtainable. Furthermore, the proposed approach learns correspondence in a completely data-driven manner without prior assumptions about the expected variability in shape cohort. Our experiments demonstrate that this approach yields similar accuracy and uncertainty estimation to the fully supervised scenario while substantially enhancing the feasibility of model training for SSM construction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.09697v1-abstract-full').style.display = 'none'; document.getElementById('2405.09697v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.17967">arXiv:2404.17967</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.17967">pdf</a>, <a href="https://arxiv.org/format/2404.17967">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SCorP: Statistics-Informed Dense Correspondence Prediction Directly from Unsegmented Medical Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Iyer%2C+K">Krithika Iyer</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Elhabian%2C+S+Y">Shireen Y. Elhabian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.17967v2-abstract-short" style="display: inline;"> Statistical shape modeling (SSM) is a powerful computational framework for quantifying and analyzing the geometric variability of anatomical structures, facilitating advancements in medical research, diagnostics, and treatment planning. Traditional methods for shape modeling from imaging data demand significant manual and computational resources. Additionally, these methods necessitate repeating t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.17967v2-abstract-full').style.display = 'inline'; document.getElementById('2404.17967v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.17967v2-abstract-full" style="display: none;"> Statistical shape modeling (SSM) is a powerful computational framework for quantifying and analyzing the geometric variability of anatomical structures, facilitating advancements in medical research, diagnostics, and treatment planning. Traditional methods for shape modeling from imaging data demand significant manual and computational resources. Additionally, these methods necessitate repeating the entire modeling pipeline to derive shape descriptors (e.g., surface-based point correspondences) for new data. While deep learning approaches have shown promise in streamlining the construction of SSMs on new data, they still rely on traditional techniques to supervise the training of the deep networks. Moreover, the predominant linearity assumption of traditional approaches restricts their efficacy, a limitation also inherited by deep learning models trained using optimized/established correspondences. Consequently, representing complex anatomies becomes challenging. To address these limitations, we introduce SCorP, a novel framework capable of predicting surface-based correspondences directly from unsegmented images. By leveraging the shape prior learned directly from surface meshes in an unsupervised manner, the proposed model eliminates the need for an optimized shape model for training supervision. The strong shape prior acts as a teacher and regularizes the feature learning of the student network to guide it in learning image-based features that are predictive of surface correspondences. The proposed model streamlines the training and inference phases by removing the supervision for the correspondence prediction task while alleviating the linearity assumption. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.17967v2-abstract-full').style.display = 'none'; document.getElementById('2404.17967v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.12603">arXiv:2404.12603</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.12603">pdf</a>, <a href="https://arxiv.org/format/2404.12603">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> </div> </div> <p class="title is-5 mathjax"> Qwerty: A Basis-Oriented Quantum Programming Language </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+A+J">Austin J. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Khan%2C+S">Sharjeel Khan</a>, <a href="/search/cs?searchtype=author&amp;query=Young%2C+J+S">Jeffrey S. Young</a>, <a href="/search/cs?searchtype=author&amp;query=Conte%2C+T+M">Thomas M. Conte</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.12603v1-abstract-short" style="display: inline;"> Quantum computers have evolved from the theoretical realm into a race to large-scale implementations. This is due to the promise of revolutionary speedups, where achieving such speedup requires designing an algorithm that harnesses the structure of a problem using quantum mechanics. Yet many quantum programming languages today require programmers to reason at a low level of quantum gate circuitry.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.12603v1-abstract-full').style.display = 'inline'; document.getElementById('2404.12603v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.12603v1-abstract-full" style="display: none;"> Quantum computers have evolved from the theoretical realm into a race to large-scale implementations. This is due to the promise of revolutionary speedups, where achieving such speedup requires designing an algorithm that harnesses the structure of a problem using quantum mechanics. Yet many quantum programming languages today require programmers to reason at a low level of quantum gate circuitry. This presents a significant barrier to entry for programmers who have not yet built up an intuition about quantum gate semantics, and it can prove to be tedious even for those who have. In this paper, we present Qwerty, a new quantum programming language that allows programmers to manipulate qubits more expressively than gates, relegating the tedious task of gate selection to the compiler. Due to its novel basis type and easy interoperability with Python, Qwerty is a powerful framework for high-level quantum-classical computation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.12603v1-abstract-full').style.display = 'none'; document.getElementById('2404.12603v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">30 pages, 27 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.13318">arXiv:2403.13318</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.13318">pdf</a>, <a href="https://arxiv.org/format/2403.13318">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Workload Estimation for Unknown Tasks: A Survey of Machine Learning Under Distribution Shift </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Smith%2C+J+B">Josh Bhagat Smith</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+A">Julie A. Adams</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.13318v1-abstract-short" style="display: inline;"> Human-robot teams involve humans and robots collaborating to achieve tasks under various environmental conditions. Successful teaming will require robots to adapt autonomously to a human teammate&#39;s internal state. An important element of such adaptation is the ability to estimate the human teammates&#39; workload in unknown situations. Existing workload models use machine learning to model the relatio&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.13318v1-abstract-full').style.display = 'inline'; document.getElementById('2403.13318v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.13318v1-abstract-full" style="display: none;"> Human-robot teams involve humans and robots collaborating to achieve tasks under various environmental conditions. Successful teaming will require robots to adapt autonomously to a human teammate&#39;s internal state. An important element of such adaptation is the ability to estimate the human teammates&#39; workload in unknown situations. Existing workload models use machine learning to model the relationships between physiological metrics and workload; however, these methods are susceptible to individual differences and are heavily influenced by other factors. These methods cannot generalize to unknown tasks, as they rely on standard machine learning approaches that assume data consists of independent and identically distributed (IID) samples. This assumption does not necessarily hold for estimating workload for new tasks. A survey of non-IID machine learning techniques is presented, where commonly used techniques are evaluated using three criteria: portability, model complexity, and adaptability. These criteria are used to argue which techniques are most applicable for estimating workload for unknown tasks in dynamic, real-time environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.13318v1-abstract-full').style.display = 'none'; document.getElementById('2403.13318v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.12290">arXiv:2403.12290</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.12290">pdf</a>, <a href="https://arxiv.org/format/2403.12290">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Estimation and Analysis of Slice Propagation Uncertainty in 3D Anatomy Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nihalaani%2C+R">Rachaell Nihalaani</a>, <a href="/search/cs?searchtype=author&amp;query=Kataria%2C+T">Tushar Kataria</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Elhabian%2C+S+Y">Shireen Y. Elhabian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.12290v2-abstract-short" style="display: inline;"> Supervised methods for 3D anatomy segmentation demonstrate superior performance but are often limited by the availability of annotated data. This limitation has led to a growing interest in self-supervised approaches in tandem with the abundance of available un-annotated data. Slice propagation has emerged as an self-supervised approach that leverages slice registration as a self-supervised task t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.12290v2-abstract-full').style.display = 'inline'; document.getElementById('2403.12290v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.12290v2-abstract-full" style="display: none;"> Supervised methods for 3D anatomy segmentation demonstrate superior performance but are often limited by the availability of annotated data. This limitation has led to a growing interest in self-supervised approaches in tandem with the abundance of available un-annotated data. Slice propagation has emerged as an self-supervised approach that leverages slice registration as a self-supervised task to achieve full anatomy segmentation with minimal supervision. This approach significantly reduces the need for domain expertise, time, and the cost associated with building fully annotated datasets required for training segmentation networks. However, this shift toward reduced supervision via deterministic networks raises concerns about the trustworthiness and reliability of predictions, especially when compared with more accurate supervised approaches. To address this concern, we propose the integration of calibrated uncertainty quantification (UQ) into slice propagation methods, providing insights into the model&#39;s predictive reliability and confidence levels. Incorporating uncertainty measures enhances user confidence in self-supervised approaches, thereby improving their practical applicability. We conducted experiments on three datasets for 3D abdominal segmentation using five UQ methods. The results illustrate that incorporating UQ improves not only model trustworthiness, but also segmentation accuracy. Furthermore, our analysis reveals various failure modes of slice propagation methods that might not be immediately apparent to end-users. This study opens up new research avenues to improve the accuracy and trustworthiness of slice propagation methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.12290v2-abstract-full').style.display = 'none'; document.getElementById('2403.12290v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages including Supplementary, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.01975">arXiv:2403.01975</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.01975">pdf</a>, <a href="https://arxiv.org/format/2403.01975">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> </div> <p class="title is-5 mathjax"> OCEL (Object-Centric Event Log) 2.0 Specification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Berti%2C+A">Alessandro Berti</a>, <a href="/search/cs?searchtype=author&amp;query=Koren%2C+I">Istvan Koren</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+N">Jan Niklas Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Park%2C+G">Gyunam Park</a>, <a href="/search/cs?searchtype=author&amp;query=Knopp%2C+B">Benedikt Knopp</a>, <a href="/search/cs?searchtype=author&amp;query=Graves%2C+N">Nina Graves</a>, <a href="/search/cs?searchtype=author&amp;query=Rafiei%2C+M">Majid Rafiei</a>, <a href="/search/cs?searchtype=author&amp;query=Li%C3%9F%2C+L">Lukas Li脽</a>, <a href="/search/cs?searchtype=author&amp;query=Unterberg%2C+L+T+G">Leah Tacke Genannt Unterberg</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yisong Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Schwanen%2C+C">Christopher Schwanen</a>, <a href="/search/cs?searchtype=author&amp;query=Pegoraro%2C+M">Marco Pegoraro</a>, <a href="/search/cs?searchtype=author&amp;query=van+der+Aalst%2C+W+M+P">Wil M. P. van der Aalst</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.01975v1-abstract-short" style="display: inline;"> Object-Centric Event Logs (OCELs) form the basis for Object-Centric Process Mining (OCPM). OCEL 1.0 was first released in 2020 and triggered the development of a range of OCPM techniques. OCEL 2.0 forms the new, more expressive standard, allowing for more extensive process analyses while remaining in an easily exchangeable format. In contrast to the first OCEL standard, it can depict changes in ob&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.01975v1-abstract-full').style.display = 'inline'; document.getElementById('2403.01975v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.01975v1-abstract-full" style="display: none;"> Object-Centric Event Logs (OCELs) form the basis for Object-Centric Process Mining (OCPM). OCEL 1.0 was first released in 2020 and triggered the development of a range of OCPM techniques. OCEL 2.0 forms the new, more expressive standard, allowing for more extensive process analyses while remaining in an easily exchangeable format. In contrast to the first OCEL standard, it can depict changes in objects, provide information on object relationships, and qualify these relationships to other objects or specific events. Compared to XES, it is more expressive, less complicated, and better readable. OCEL 2.0 offers three exchange formats: a relational database (SQLite), XML, and JSON format. This OCEL 2.0 specification document provides an introduction to the standard, its metamodel, and its exchange formats, aimed at practitioners and researchers alike. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.01975v1-abstract-full').style.display = 'none'; document.getElementById('2403.01975v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.15484">arXiv:2401.15484</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.15484">pdf</a>, <a href="https://arxiv.org/format/2401.15484">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> R$\times$R: Rapid eXploration for Reinforcement Learning via Sampling-based Reset Distributions and Imitation Pre-training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Khandate%2C+G">Gagan Khandate</a>, <a href="/search/cs?searchtype=author&amp;query=Saidi%2C+T+L">Tristan L. Saidi</a>, <a href="/search/cs?searchtype=author&amp;query=Shang%2C+S">Siqi Shang</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+E+T">Eric T. Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Dennis%2C+S">Seth Dennis</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Johnson Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Ciocarlie%2C+M">Matei Ciocarlie</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.15484v1-abstract-short" style="display: inline;"> We present a method for enabling Reinforcement Learning of motor control policies for complex skills such as dexterous manipulation. We posit that a key difficulty for training such policies is the difficulty of exploring the problem state space, as the accessible and useful regions of this space form a complex structure along manifolds of the original high-dimensional state space. This work prese&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.15484v1-abstract-full').style.display = 'inline'; document.getElementById('2401.15484v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.15484v1-abstract-full" style="display: none;"> We present a method for enabling Reinforcement Learning of motor control policies for complex skills such as dexterous manipulation. We posit that a key difficulty for training such policies is the difficulty of exploring the problem state space, as the accessible and useful regions of this space form a complex structure along manifolds of the original high-dimensional state space. This work presents a method to enable and support exploration with Sampling-based Planning. We use a generally applicable non-holonomic Rapidly-exploring Random Trees algorithm and present multiple methods to use the resulting structure to bootstrap model-free Reinforcement Learning. Our method is effective at learning various challenging dexterous motor control skills of higher difficulty than previously shown. In particular, we achieve dexterous in-hand manipulation of complex objects while simultaneously securing the object without the use of passive support surfaces. These policies also transfer effectively to real robots. A number of example videos can also be found on the project website: https://sbrl.cs.columbia.edu <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.15484v1-abstract-full').style.display = 'none'; document.getElementById('2401.15484v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 14 figures, submitted to Autonomous Robots, RSS 2023 Special Issue. arXiv admin note: substantial text overlap with arXiv:2303.03486</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.12480">arXiv:2310.12480</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.12480">pdf</a>, <a href="https://arxiv.org/format/2310.12480">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> GRAPE-S: Near Real-Time Coalition Formation for Multiple Service Collectives </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Diehl%2C+G">Grace Diehl</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+A">Julie A. Adams</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.12480v1-abstract-short" style="display: inline;"> Robotic collectives for military and disaster response applications require coalition formation algorithms to partition robots into appropriate task teams. Collectives&#39; missions will often incorporate tasks that require multiple high-level robot behaviors or services, which coalition formation must accommodate. The highly dynamic and unstructured application domains also necessitate that coalition&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.12480v1-abstract-full').style.display = 'inline'; document.getElementById('2310.12480v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.12480v1-abstract-full" style="display: none;"> Robotic collectives for military and disaster response applications require coalition formation algorithms to partition robots into appropriate task teams. Collectives&#39; missions will often incorporate tasks that require multiple high-level robot behaviors or services, which coalition formation must accommodate. The highly dynamic and unstructured application domains also necessitate that coalition formation algorithms produce near optimal solutions (i.e., &gt;95% utility) in near real-time (i.e., &lt;5 minutes) with very large collectives (i.e., hundreds of robots). No previous coalition formation algorithm satisfies these requirements. An initial evaluation found that traditional auction-based algorithms&#39; runtimes are too long, even though the centralized simulator incorporated ideal conditions unlikely to occur in real-world deployments (i.e., synchronization across robots and perfect, instantaneous communication). The hedonic game-based GRAPE algorithm can produce solutions in near real-time, but cannot be applied to multiple service collectives. This manuscript integrates GRAPE and a services model, producing GRAPE-S and Pair-GRAPE-S. These algorithms and two auction baselines were evaluated using a centralized simulator with up to 1000 robots, and via the largest distributed coalition formation simulated evaluation to date, with up to 500 robots. The evaluations demonstrate that auctions transfer poorly to distributed collectives, resulting in excessive runtimes and low utility solutions. GRAPE-S satisfies the target domains&#39; coalition formation requirements, producing near optimal solutions in near real-time, and Pair-GRAPE-S more than satisfies the domain requirements, producing optimal solutions in near real-time. GRAPE-S and Pair-GRAPE-S are the first algorithms demonstrated to support near real-time coalition formation for very large, distributed collectives with multiple services. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.12480v1-abstract-full').style.display = 'none'; document.getElementById('2310.12480v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.11332">arXiv:2310.11332</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.11332">pdf</a>, <a href="https://arxiv.org/format/2310.11332">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> </div> <p class="title is-5 mathjax"> Discovering High-Quality Process Models Despite Data Scarcity </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+N">Jan Niklas Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Peeperkorn%2C+J">Jari Peeperkorn</a>, <a href="/search/cs?searchtype=author&amp;query=Brockhoff%2C+T">Tobias Brockhoff</a>, <a href="/search/cs?searchtype=author&amp;query=Terrier%2C+I">Isabelle Terrier</a>, <a href="/search/cs?searchtype=author&amp;query=G%C3%B6hner%2C+H">Heiko G枚hner</a>, <a href="/search/cs?searchtype=author&amp;query=Uysal%2C+M+S">Merih Seran Uysal</a>, <a href="/search/cs?searchtype=author&amp;query=Broucke%2C+S+v">Seppe vanden Broucke</a>, <a href="/search/cs?searchtype=author&amp;query=De+Weerdt%2C+J">Jochen De Weerdt</a>, <a href="/search/cs?searchtype=author&amp;query=van+der+Aalst%2C+W+M+P">Wil M. P. van der Aalst</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.11332v1-abstract-short" style="display: inline;"> Process discovery algorithms learn process models from executed activity sequences, describing concurrency, causality, and conflict. Concurrent activities require observing multiple permutations, increasing data requirements, especially for processes with concurrent subprocesses such as hierarchical, composite, or distributed processes. While process discovery algorithms traditionally use sequence&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.11332v1-abstract-full').style.display = 'inline'; document.getElementById('2310.11332v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.11332v1-abstract-full" style="display: none;"> Process discovery algorithms learn process models from executed activity sequences, describing concurrency, causality, and conflict. Concurrent activities require observing multiple permutations, increasing data requirements, especially for processes with concurrent subprocesses such as hierarchical, composite, or distributed processes. While process discovery algorithms traditionally use sequences of activities as input, recently introduced object-centric process discovery algorithms can use graphs of activities as input, encoding partial orders between activities. As such, they contain the concurrency information of many sequences in a single graph. In this paper, we address the research question of reducing process discovery data requirements when using object-centric event logs for process discovery. We classify different real-life processes according to the control-flow complexity within and between subprocesses and introduce an evaluation framework to assess process discovery algorithm quality of traditional and object-centric process discovery based on the sample size. We complement this with a large-scale production process case study. Our results show reduced data requirements, enabling the discovery of large, concurrent processes such as manufacturing with little data, previously infeasible with traditional process discovery. Our findings suggest that object-centric process mining could revolutionize process discovery in various sectors, including manufacturing and supply chains. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.11332v1-abstract-full').style.display = 'none'; document.getElementById('2310.11332v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.01529">arXiv:2310.01529</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.01529">pdf</a>, <a href="https://arxiv.org/format/2310.01529">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Progressive DeepSSM: Training Methodology for Image-To-Shape Deep Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Aziz%2C+A+Z+B">Abu Zahid Bin Aziz</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Elhabian%2C+S">Shireen Elhabian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.01529v1-abstract-short" style="display: inline;"> Statistical shape modeling (SSM) is an enabling quantitative tool to study anatomical shapes in various medical applications. However, directly using 3D images in these applications still has a long way to go. Recent deep learning methods have paved the way for reducing the substantial preprocessing steps to construct SSMs directly from unsegmented images. Nevertheless, the performance of these mo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.01529v1-abstract-full').style.display = 'inline'; document.getElementById('2310.01529v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.01529v1-abstract-full" style="display: none;"> Statistical shape modeling (SSM) is an enabling quantitative tool to study anatomical shapes in various medical applications. However, directly using 3D images in these applications still has a long way to go. Recent deep learning methods have paved the way for reducing the substantial preprocessing steps to construct SSMs directly from unsegmented images. Nevertheless, the performance of these models is not up to the mark. Inspired by multiscale/multiresolution learning, we propose a new training strategy, progressive DeepSSM, to train image-to-shape deep learning models. The training is performed in multiple scales, and each scale utilizes the output from the previous scale. This strategy enables the model to learn coarse shape features in the first scales and gradually learn detailed fine shape features in the later scales. We leverage shape priors via segmentation-guided multi-task learning and employ deep supervision loss to ensure learning at each scale. Experiments show the superiority of models trained by the proposed strategy from both quantitative and qualitative perspectives. This training methodology can be employed to improve the stability and accuracy of any deep learning method for inferring statistical representations of anatomies from medical images and can be adopted by existing deep learning methods to improve model accuracy and training stability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.01529v1-abstract-full').style.display = 'none'; document.getElementById('2310.01529v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in ShapeMI MICCAI 2023: Workshop on Shape in Medical Imaging</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.14089">arXiv:2308.14089</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.14089">pdf</a>, <a href="https://arxiv.org/format/2308.14089">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> MedAlign: A Clinician-Generated Dataset for Instruction Following with Electronic Medical Records </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Fleming%2C+S+L">Scott L. Fleming</a>, <a href="/search/cs?searchtype=author&amp;query=Lozano%2C+A">Alejandro Lozano</a>, <a href="/search/cs?searchtype=author&amp;query=Haberkorn%2C+W+J">William J. Haberkorn</a>, <a href="/search/cs?searchtype=author&amp;query=Jindal%2C+J+A">Jenelle A. Jindal</a>, <a href="/search/cs?searchtype=author&amp;query=Reis%2C+E+P">Eduardo P. Reis</a>, <a href="/search/cs?searchtype=author&amp;query=Thapa%2C+R">Rahul Thapa</a>, <a href="/search/cs?searchtype=author&amp;query=Blankemeier%2C+L">Louis Blankemeier</a>, <a href="/search/cs?searchtype=author&amp;query=Genkins%2C+J+Z">Julian Z. Genkins</a>, <a href="/search/cs?searchtype=author&amp;query=Steinberg%2C+E">Ethan Steinberg</a>, <a href="/search/cs?searchtype=author&amp;query=Nayak%2C+A">Ashwin Nayak</a>, <a href="/search/cs?searchtype=author&amp;query=Patel%2C+B+S">Birju S. Patel</a>, <a href="/search/cs?searchtype=author&amp;query=Chiang%2C+C">Chia-Chun Chiang</a>, <a href="/search/cs?searchtype=author&amp;query=Callahan%2C+A">Alison Callahan</a>, <a href="/search/cs?searchtype=author&amp;query=Huo%2C+Z">Zepeng Huo</a>, <a href="/search/cs?searchtype=author&amp;query=Gatidis%2C+S">Sergios Gatidis</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+S+J">Scott J. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Fayanju%2C+O">Oluseyi Fayanju</a>, <a href="/search/cs?searchtype=author&amp;query=Shah%2C+S+J">Shreya J. Shah</a>, <a href="/search/cs?searchtype=author&amp;query=Savage%2C+T">Thomas Savage</a>, <a href="/search/cs?searchtype=author&amp;query=Goh%2C+E">Ethan Goh</a>, <a href="/search/cs?searchtype=author&amp;query=Chaudhari%2C+A+S">Akshay S. Chaudhari</a>, <a href="/search/cs?searchtype=author&amp;query=Aghaeepour%2C+N">Nima Aghaeepour</a>, <a href="/search/cs?searchtype=author&amp;query=Sharp%2C+C">Christopher Sharp</a>, <a href="/search/cs?searchtype=author&amp;query=Pfeffer%2C+M+A">Michael A. Pfeffer</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+P">Percy Liang</a> , et al. (5 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.14089v2-abstract-short" style="display: inline;"> The ability of large language models (LLMs) to follow natural language instructions with human-level fluency suggests many opportunities in healthcare to reduce administrative burden and improve quality of care. However, evaluating LLMs on realistic text generation tasks for healthcare remains challenging. Existing question answering datasets for electronic health record (EHR) data fail to capture&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.14089v2-abstract-full').style.display = 'inline'; document.getElementById('2308.14089v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.14089v2-abstract-full" style="display: none;"> The ability of large language models (LLMs) to follow natural language instructions with human-level fluency suggests many opportunities in healthcare to reduce administrative burden and improve quality of care. However, evaluating LLMs on realistic text generation tasks for healthcare remains challenging. Existing question answering datasets for electronic health record (EHR) data fail to capture the complexity of information needs and documentation burdens experienced by clinicians. To address these challenges, we introduce MedAlign, a benchmark dataset of 983 natural language instructions for EHR data. MedAlign is curated by 15 clinicians (7 specialities), includes clinician-written reference responses for 303 instructions, and provides 276 longitudinal EHRs for grounding instruction-response pairs. We used MedAlign to evaluate 6 general domain LLMs, having clinicians rank the accuracy and quality of each LLM response. We found high error rates, ranging from 35% (GPT-4) to 68% (MPT-7B-Instruct), and an 8.3% drop in accuracy moving from 32k to 2k context lengths for GPT-4. Finally, we report correlations between clinician rankings and automated natural language generation metrics as a way to rank LLMs without human review. We make MedAlign available under a research data use agreement to enable LLM evaluations on tasks aligned with clinician needs and preferences. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.14089v2-abstract-full').style.display = 'none'; document.getElementById('2308.14089v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.07878">arXiv:2308.07878</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.07878">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> The $10 Million ANA Avatar XPRIZE Competition Advanced Immersive Telepresence Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Behnke%2C+S">Sven Behnke</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+A">Julie A. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Locke%2C+D">David Locke</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.07878v1-abstract-short" style="display: inline;"> The $10M ANA Avatar XPRIZE aimed to create avatar systems that can transport human presence to remote locations in real time. The participants of this multi-year competition developed robotic systems that allow operators to see, hear, and interact with a remote environment in a way that feels as if they are truly there. On the other hand, people in the remote environment were given the impression&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.07878v1-abstract-full').style.display = 'inline'; document.getElementById('2308.07878v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.07878v1-abstract-full" style="display: none;"> The $10M ANA Avatar XPRIZE aimed to create avatar systems that can transport human presence to remote locations in real time. The participants of this multi-year competition developed robotic systems that allow operators to see, hear, and interact with a remote environment in a way that feels as if they are truly there. On the other hand, people in the remote environment were given the impression that the operator was present inside the avatar robot. At the competition finals, held in November 2022 in Long Beach, CA, USA, the avatar systems were evaluated on their support for remotely interacting with humans, exploring new environments, and employing specialized skills. This article describes the competition stages with tasks and evaluation procedures, reports the results, presents the winning teams&#39; approaches, and discusses lessons learned. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.07878v1-abstract-full').style.display = 'none'; document.getElementById('2308.07878v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Extended version of article accepted for competitions column</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Robotics and Automation Magazine, 2023 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.07506">arXiv:2308.07506</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.07506">pdf</a>, <a href="https://arxiv.org/format/2308.07506">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Benchmarking Scalable Epistemic Uncertainty Quantification in Organ Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Elhabian%2C+S+Y">Shireen Y. Elhabian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.07506v1-abstract-short" style="display: inline;"> Deep learning based methods for automatic organ segmentation have shown promise in aiding diagnosis and treatment planning. However, quantifying and understanding the uncertainty associated with model predictions is crucial in critical clinical applications. While many techniques have been proposed for epistemic or model-based uncertainty estimation, it is unclear which method is preferred in the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.07506v1-abstract-full').style.display = 'inline'; document.getElementById('2308.07506v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.07506v1-abstract-full" style="display: none;"> Deep learning based methods for automatic organ segmentation have shown promise in aiding diagnosis and treatment planning. However, quantifying and understanding the uncertainty associated with model predictions is crucial in critical clinical applications. While many techniques have been proposed for epistemic or model-based uncertainty estimation, it is unclear which method is preferred in the medical image analysis setting. This paper presents a comprehensive benchmarking study that evaluates epistemic uncertainty quantification methods in organ segmentation in terms of accuracy, uncertainty calibration, and scalability. We provide a comprehensive discussion of the strengths, weaknesses, and out-of-distribution detection capabilities of each method as well as recommendations for future improvements. These findings contribute to the development of reliable and robust models that yield accurate segmentations while effectively quantifying epistemic uncertainty. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.07506v1-abstract-full').style.display = 'none'; document.getElementById('2308.07506v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to the UNSURE Workshop held in conjunction with MICCAI 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.06293">arXiv:2308.06293</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.06293">pdf</a>, <a href="https://arxiv.org/format/2308.06293">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Target Detection on Hyperspectral Images Using MCMC and VI Trained Bayesian Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ries%2C+D">Daniel Ries</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jason Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Zollweg%2C+J">Joshua Zollweg</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.06293v1-abstract-short" style="display: inline;"> Neural networks (NN) have become almost ubiquitous with image classification, but in their standard form produce point estimates, with no measure of confidence. Bayesian neural networks (BNN) provide uncertainty quantification (UQ) for NN predictions and estimates through the posterior distribution. As NN are applied in more high-consequence applications, UQ is becoming a requirement. BNN provide&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.06293v1-abstract-full').style.display = 'inline'; document.getElementById('2308.06293v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.06293v1-abstract-full" style="display: none;"> Neural networks (NN) have become almost ubiquitous with image classification, but in their standard form produce point estimates, with no measure of confidence. Bayesian neural networks (BNN) provide uncertainty quantification (UQ) for NN predictions and estimates through the posterior distribution. As NN are applied in more high-consequence applications, UQ is becoming a requirement. BNN provide a solution to this problem by not only giving accurate predictions and estimates, but also an interval that includes reasonable values within a desired probability. Despite their positive attributes, BNN are notoriously difficult and time consuming to train. Traditional Bayesian methods use Markov Chain Monte Carlo (MCMC), but this is often brushed aside as being too slow. The most common method is variational inference (VI) due to its fast computation, but there are multiple concerns with its efficacy. We apply and compare MCMC- and VI-trained BNN in the context of target detection in hyperspectral imagery (HSI), where materials of interest can be identified by their unique spectral signature. This is a challenging field, due to the numerous permuting effects practical collection of HSI has on measured spectra. Both models are trained using out-of-the-box tools on a high fidelity HSI target detection scene. Both MCMC- and VI-trained BNN perform well overall at target detection on a simulated HSI scene. This paper provides an example of how to utilize the benefits of UQ, but also to increase awareness that different training methods can give different results for the same model. If sufficient computational resources are available, the best approach rather than the fastest or most efficient should be used, especially for high consequence problems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.06293v1-abstract-full').style.display = 'none'; document.getElementById('2308.06293v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.05903">arXiv:2308.05903</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.05903">pdf</a>, <a href="https://arxiv.org/format/2308.05903">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Comparing the quality of neural network uncertainty estimates for classification problems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ries%2C+D">Daniel Ries</a>, <a href="/search/cs?searchtype=author&amp;query=Michalenko%2C+J">Joshua Michalenko</a>, <a href="/search/cs?searchtype=author&amp;query=Ganter%2C+T">Tyler Ganter</a>, <a href="/search/cs?searchtype=author&amp;query=Baiyasi%2C+R+I">Rashad Imad-Fayez Baiyasi</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jason Adams</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.05903v1-abstract-short" style="display: inline;"> Traditional deep learning (DL) models are powerful classifiers, but many approaches do not provide uncertainties for their estimates. Uncertainty quantification (UQ) methods for DL models have received increased attention in the literature due to their usefulness in decision making, particularly for high-consequence decisions. However, there has been little research done on how to evaluate the qua&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.05903v1-abstract-full').style.display = 'inline'; document.getElementById('2308.05903v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.05903v1-abstract-full" style="display: none;"> Traditional deep learning (DL) models are powerful classifiers, but many approaches do not provide uncertainties for their estimates. Uncertainty quantification (UQ) methods for DL models have received increased attention in the literature due to their usefulness in decision making, particularly for high-consequence decisions. However, there has been little research done on how to evaluate the quality of such methods. We use statistical methods of frequentist interval coverage and interval width to evaluate the quality of credible intervals, and expected calibration error to evaluate classification predicted confidence. These metrics are evaluated on Bayesian neural networks (BNN) fit using Markov Chain Monte Carlo (MCMC) and variational inference (VI), bootstrapped neural networks (NN), Deep Ensembles (DE), and Monte Carlo (MC) dropout. We apply these different UQ for DL methods to a hyperspectral image target detection problem and show the inconsistency of the different methods&#39; results and the necessity of a UQ quality metric. To reconcile these differences and choose a UQ method that appropriately quantifies the uncertainty, we create a simulated data set with fully parameterized probability distribution for a two-class classification problem. The gold standard MCMC performs the best overall, and the bootstrapped NN is a close second, requiring the same computational expense as DE. Through this comparison, we demonstrate that, for a given data set, different models can produce uncertainty estimates of markedly different quality. This in turn points to a great need for principled assessment methods of UQ quality in DL applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.05903v1-abstract-full').style.display = 'none'; document.getElementById('2308.05903v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.00102">arXiv:2308.00102</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.00102">pdf</a>, <a href="https://arxiv.org/format/2308.00102">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.55417/fr.2023026">10.55417/fr.2023026 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Can A Single Human Supervise A Swarm of 100 Heterogeneous Robots? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+A">Julie A. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Hamell%2C+J">Joshua Hamell</a>, <a href="/search/cs?searchtype=author&amp;query=Walker%2C+P">Phillip Walker</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.00102v1-abstract-short" style="display: inline;"> An open research question has been whether a single human can supervise a true heterogeneous swarm of robots completing tasks in real world environments. A general concern is whether or not the human&#39;s workload will be taxed to the breaking point. The Defense Advanced Research Projects Agency&#39;s OFFsensive Swarm-Enabled Tactics program&#39;s field exercises that occurred at U.S. Army urban training sit&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.00102v1-abstract-full').style.display = 'inline'; document.getElementById('2308.00102v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.00102v1-abstract-full" style="display: none;"> An open research question has been whether a single human can supervise a true heterogeneous swarm of robots completing tasks in real world environments. A general concern is whether or not the human&#39;s workload will be taxed to the breaking point. The Defense Advanced Research Projects Agency&#39;s OFFsensive Swarm-Enabled Tactics program&#39;s field exercises that occurred at U.S. Army urban training sites provided the opportunity to understand the impact of achieving such swarm deployments. The Command and Control of Aggregate Swarm Tactics integrator team&#39;s swarm commander users the heterogeneous robot swarm to conduct relevant missions. During the final OFFSET program field exercise, the team collected objective and subjective metrics related to teh swarm commander&#39;s human performance. A multi-dimensional workload algorithm that estimates overall workload based on five components of workload was used to analyze the results. While the swarm commander&#39;s workload estimate did cross the overload threshold frequently, the swarm commander was able to successfully complete the missions, often under challenging operational conditions. The presented results demonstrate that a single human can deploy a swarm of 100 heterogeneous robots to conduct real-world missions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.00102v1-abstract-full').style.display = 'none'; document.getElementById('2308.00102v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">48 pages, 9 figures, 15 tables, 2 appendices, Accepted Field Robotics, Special Issue: Dynamic Large-Scale Swarm Systems in Urban Environments: Results from the DARPA OFFSET Program, Accepted April 2023</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Field Robotics, 3:837-881, 2023 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.16788">arXiv:2307.16788</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.16788">pdf</a>, <a href="https://arxiv.org/format/2307.16788">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.55417/fr.2023005">10.55417/fr.2023005 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Congestion Analysis for the DARPA OFFSET CCAST Swarm </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Brown%2C+R">Robert Brown</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+A">Julie A. Adams</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.16788v1-abstract-short" style="display: inline;"> The Defense Advanced Research Projects Agency (DARPA) OFFensive Swarm-Enabled Tactics program&#39;s goal of launching 250 unmanned aerial and ground vehicles from a limited sized launch zone was a daunting challenge. The swarm&#39;s aerial vehicles were primarily multirotor platforms, which can efficiently be launched en masse. Each field exercise expected the deployment of an even larger swarm. While the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.16788v1-abstract-full').style.display = 'inline'; document.getElementById('2307.16788v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.16788v1-abstract-full" style="display: none;"> The Defense Advanced Research Projects Agency (DARPA) OFFensive Swarm-Enabled Tactics program&#39;s goal of launching 250 unmanned aerial and ground vehicles from a limited sized launch zone was a daunting challenge. The swarm&#39;s aerial vehicles were primarily multirotor platforms, which can efficiently be launched en masse. Each field exercise expected the deployment of an even larger swarm. While the launch zone&#39;s spatial area increased with each field exercise, the relative space for each vehicle was not necessarily increased, considering the increasing size of the swarm and the vehicles&#39; associated GPS error; however, safe mission deployment and execution were expected. At the same time, achieving the mission goals required maximizing efficiency of the swarm&#39;s performance by reducing congestion that blocked vehicles from completing tactic assignments. Congestion analysis conducted before the final field exercise focused on adjusting various constraints to optimize the swarm&#39;s deployment without reducing safety. During the field exercise, data was collected that permitted analyzing the number and durations of individual vehicle blockages&#39; impact on the resulting congestion. After the field exercise, additional analyses used the mission plan to validate the use of simulation for analyzing congestion. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.16788v1-abstract-full').style.display = 'none'; document.getElementById('2307.16788v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">32 pages, 15 figures with multiple subfigures, Field Robotics Journal, published</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Field Robotics, Special Issue: Dynamic Large-Scale Swarm Systems in Urban Environments: Results from the DARPA OFFSET Program, 3: 190-221, 2023 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.04427">arXiv:2307.04427</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.04427">pdf</a>, <a href="https://arxiv.org/format/2307.04427">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Astrophysical Phenomena">astro-ph.HE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1126/science.adc9818">10.1126/science.adc9818 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Observation of high-energy neutrinos from the Galactic plane </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Abbasi%2C+R">R. Abbasi</a>, <a href="/search/cs?searchtype=author&amp;query=Ackermann%2C+M">M. Ackermann</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">J. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Aguilar%2C+J+A">J. A. Aguilar</a>, <a href="/search/cs?searchtype=author&amp;query=Ahlers%2C+M">M. Ahlers</a>, <a href="/search/cs?searchtype=author&amp;query=Ahrens%2C+M">M. Ahrens</a>, <a href="/search/cs?searchtype=author&amp;query=Alameddine%2C+J+M">J. M. Alameddine</a>, <a href="/search/cs?searchtype=author&amp;query=Alves%2C+A+A">A. A. Alves Jr.</a>, <a href="/search/cs?searchtype=author&amp;query=Amin%2C+N+M">N. M. Amin</a>, <a href="/search/cs?searchtype=author&amp;query=Andeen%2C+K">K. Andeen</a>, <a href="/search/cs?searchtype=author&amp;query=Anderson%2C+T">T. Anderson</a>, <a href="/search/cs?searchtype=author&amp;query=Anton%2C+G">G. Anton</a>, <a href="/search/cs?searchtype=author&amp;query=Arg%C3%BCelles%2C+C">C. Arg眉elles</a>, <a href="/search/cs?searchtype=author&amp;query=Ashida%2C+Y">Y. Ashida</a>, <a href="/search/cs?searchtype=author&amp;query=Athanasiadou%2C+S">S. Athanasiadou</a>, <a href="/search/cs?searchtype=author&amp;query=Axani%2C+S">S. Axani</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+X">X. Bai</a>, <a href="/search/cs?searchtype=author&amp;query=V.%2C+A+B">A. Balagopal V.</a>, <a href="/search/cs?searchtype=author&amp;query=Barwick%2C+S+W">S. W. Barwick</a>, <a href="/search/cs?searchtype=author&amp;query=Basu%2C+V">V. Basu</a>, <a href="/search/cs?searchtype=author&amp;query=Baur%2C+S">S. Baur</a>, <a href="/search/cs?searchtype=author&amp;query=Bay%2C+R">R. Bay</a>, <a href="/search/cs?searchtype=author&amp;query=Beatty%2C+J+J">J. J. Beatty</a>, <a href="/search/cs?searchtype=author&amp;query=Becker%2C+K+-">K. -H. Becker</a>, <a href="/search/cs?searchtype=author&amp;query=Tjus%2C+J+B">J. Becker Tjus</a> , et al. (364 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.04427v1-abstract-short" style="display: inline;"> The origin of high-energy cosmic rays, atomic nuclei that continuously impact Earth&#39;s atmosphere, has been a mystery for over a century. Due to deflection in interstellar magnetic fields, cosmic rays from the Milky Way arrive at Earth from random directions. However, near their sources and during propagation, cosmic rays interact with matter and produce high-energy neutrinos. We search for neutrin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.04427v1-abstract-full').style.display = 'inline'; document.getElementById('2307.04427v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.04427v1-abstract-full" style="display: none;"> The origin of high-energy cosmic rays, atomic nuclei that continuously impact Earth&#39;s atmosphere, has been a mystery for over a century. Due to deflection in interstellar magnetic fields, cosmic rays from the Milky Way arrive at Earth from random directions. However, near their sources and during propagation, cosmic rays interact with matter and produce high-energy neutrinos. We search for neutrino emission using machine learning techniques applied to ten years of data from the IceCube Neutrino Observatory. We identify neutrino emission from the Galactic plane at the 4.5$蟽$ level of significance, by comparing diffuse emission models to a background-only hypothesis. The signal is consistent with modeled diffuse emission from the Galactic plane, but could also arise from a population of unresolved point sources. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.04427v1-abstract-full').style.display = 'none'; document.getElementById('2307.04427v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted on May 12th, 2022; Accepted on May 4th, 2023</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Science 380, 6652, 1338-1343 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.05590">arXiv:2306.05590</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.05590">pdf</a>, <a href="https://arxiv.org/format/2306.05590">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> The Viability of Domain Constrained Coalition Formation for Robotic Collectives </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Diehl%2C+G">Grace Diehl</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+A">Julie A. Adams</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.05590v1-abstract-short" style="display: inline;"> Applications, such as military and disaster response, can benefit from robotic collectives&#39; ability to perform multiple cooperative tasks (e.g., surveillance, damage assessments) efficiently across a large spatial area. Coalition formation algorithms can potentially facilitate collective robots&#39; assignment to appropriate task teams; however, most coalition formation algorithms were designed for sm&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.05590v1-abstract-full').style.display = 'inline'; document.getElementById('2306.05590v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.05590v1-abstract-full" style="display: none;"> Applications, such as military and disaster response, can benefit from robotic collectives&#39; ability to perform multiple cooperative tasks (e.g., surveillance, damage assessments) efficiently across a large spatial area. Coalition formation algorithms can potentially facilitate collective robots&#39; assignment to appropriate task teams; however, most coalition formation algorithms were designed for smaller multiple robot systems (i.e., 2-50 robots). Collectives&#39; scale and domain-relevant constraints (i.e., distribution, near real-time, minimal communication) make coalition formation more challenging. This manuscript identifies the challenges inherent to designing coalition formation algorithms for very large collectives (e.g., 1000 robots). A survey of multiple robot coalition formation algorithms finds that most are unable to transfer directly to collectives, due to the identified system differences; however, auctions and hedonic games may be the most transferable. A simulation-based evaluation of three auction and hedonic game algorithms, applied to homogeneous and heterogeneous collectives, demonstrates that there are collective compositions for which no existing algorithm is viable; however, the experimental results and literature survey suggest paths forward. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.05590v1-abstract-full').style.display = 'none'; document.getElementById('2306.05590v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">46 pages, 9 figures, Swarm Intelligence (under review)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.00025">arXiv:2306.00025</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.00025">pdf</a>, <a href="https://arxiv.org/format/2306.00025">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3593013.3594075">10.1145/3593013.3594075 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Disentangling and Operationalizing AI Fairness at LinkedIn </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Qui%C3%B1onero-Candela%2C+J">Joaquin Qui帽onero-Candela</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yuwen Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Hsu%2C+B">Brian Hsu</a>, <a href="/search/cs?searchtype=author&amp;query=Jain%2C+S">Sakshi Jain</a>, <a href="/search/cs?searchtype=author&amp;query=Ramos%2C+J">Jen Ramos</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jon Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Hallman%2C+R">Robert Hallman</a>, <a href="/search/cs?searchtype=author&amp;query=Basu%2C+K">Kinjal Basu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.00025v1-abstract-short" style="display: inline;"> Operationalizing AI fairness at LinkedIn&#39;s scale is challenging not only because there are multiple mutually incompatible definitions of fairness but also because determining what is fair depends on the specifics and context of the product where AI is deployed. Moreover, AI practitioners need clarity on what fairness expectations need to be addressed at the AI level. In this paper, we present the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.00025v1-abstract-full').style.display = 'inline'; document.getElementById('2306.00025v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.00025v1-abstract-full" style="display: none;"> Operationalizing AI fairness at LinkedIn&#39;s scale is challenging not only because there are multiple mutually incompatible definitions of fairness but also because determining what is fair depends on the specifics and context of the product where AI is deployed. Moreover, AI practitioners need clarity on what fairness expectations need to be addressed at the AI level. In this paper, we present the evolving AI fairness framework used at LinkedIn to address these three challenges. The framework disentangles AI fairness by separating out equal treatment and equitable product expectations. Rather than imposing a trade-off between these two commonly opposing interpretations of fairness, the framework provides clear guidelines for operationalizing equal AI treatment complemented with a product equity strategy. This paper focuses on the equal AI treatment component of LinkedIn&#39;s AI fairness framework, shares the principles that support it, and illustrates their application through a case study. We hope this paper will encourage other big tech companies to join us in sharing their approach to operationalizing AI fairness at scale, so that together we can keep advancing this constantly evolving field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.00025v1-abstract-full').style.display = 'none'; document.getElementById('2306.00025v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.14486">arXiv:2305.14486</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.14486">pdf</a>, <a href="https://arxiv.org/format/2305.14486">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Point2SSM: Learning Morphological Variations of Anatomies from Point Cloud </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Elhabian%2C+S">Shireen Elhabian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.14486v2-abstract-short" style="display: inline;"> We present Point2SSM, a novel unsupervised learning approach for constructing correspondence-based statistical shape models (SSMs) directly from raw point clouds. SSM is crucial in clinical research, enabling population-level analysis of morphological variation in bones and organs. Traditional methods of SSM construction have limitations, including the requirement of noise-free surface meshes or b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.14486v2-abstract-full').style.display = 'inline'; document.getElementById('2305.14486v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.14486v2-abstract-full" style="display: none;"> We present Point2SSM, a novel unsupervised learning approach for constructing correspondence-based statistical shape models (SSMs) directly from raw point clouds. SSM is crucial in clinical research, enabling population-level analysis of morphological variation in bones and organs. Traditional methods of SSM construction have limitations, including the requirement of noise-free surface meshes or binary volumes, reliance on assumptions or templates, and prolonged inference times due to simultaneous optimization of the entire cohort. Point2SSM overcomes these barriers by providing a data-driven solution that infers SSMs directly from raw point clouds, reducing inference burdens and increasing applicability as point clouds are more easily acquired. While deep learning on 3D point clouds has seen success in unsupervised representation learning and shape correspondence, its application to anatomical SSM construction is largely unexplored. We conduct a benchmark of state-of-the-art point cloud deep networks on the SSM task, revealing their limited robustness to clinical challenges such as noisy, sparse, or incomplete input and limited training data. Point2SSM addresses these issues through an attention-based module, providing effective correspondence mappings from learned point features. Our results demonstrate that the proposed method significantly outperforms existing networks in terms of accurate surface sampling and correspondence, better capturing population-level statistics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.14486v2-abstract-full').style.display = 'none'; document.getElementById('2305.14486v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted as a Spotlight presentation at ICLR 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.05797">arXiv:2305.05797</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.05797">pdf</a>, <a href="https://arxiv.org/format/2305.05797">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Fully Bayesian VIB-DeepSSM </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Elhabian%2C+S">Shireen Elhabian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.05797v2-abstract-short" style="display: inline;"> Statistical shape modeling (SSM) enables population-based quantitative analysis of anatomical shapes, informing clinical diagnosis. Deep learning approaches predict correspondence-based SSM directly from unsegmented 3D images but require calibrated uncertainty quantification, motivating Bayesian formulations. Variational information bottleneck DeepSSM (VIB-DeepSSM) is an effective, principled fram&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.05797v2-abstract-full').style.display = 'inline'; document.getElementById('2305.05797v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.05797v2-abstract-full" style="display: none;"> Statistical shape modeling (SSM) enables population-based quantitative analysis of anatomical shapes, informing clinical diagnosis. Deep learning approaches predict correspondence-based SSM directly from unsegmented 3D images but require calibrated uncertainty quantification, motivating Bayesian formulations. Variational information bottleneck DeepSSM (VIB-DeepSSM) is an effective, principled framework for predicting probabilistic shapes of anatomy from images with aleatoric uncertainty quantification. However, VIB is only half-Bayesian and lacks epistemic uncertainty inference. We derive a fully Bayesian VIB formulation and demonstrate the efficacy of two scalable implementation approaches: concrete dropout and batch ensemble. Additionally, we introduce a novel combination of the two that further enhances uncertainty calibration via multimodal marginalization. Experiments on synthetic shapes and left atrium data demonstrate that the fully Bayesian VIB network predicts SSM from images with improved uncertainty reasoning without sacrificing accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.05797v2-abstract-full').style.display = 'none'; document.getElementById('2305.05797v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to MICCAI 2023. 13 pages, 4 figures, appendix</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.05610">arXiv:2305.05610</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.05610">pdf</a>, <a href="https://arxiv.org/format/2305.05610">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Can point cloud networks learn statistical shape models of anatomies? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Elhabian%2C+S">Shireen Elhabian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.05610v2-abstract-short" style="display: inline;"> Statistical Shape Modeling (SSM) is a valuable tool for investigating and quantifying anatomical variations within populations of anatomies. However, traditional correspondence-based SSM generation methods have a prohibitive inference process and require complete geometric proxies (e.g., high-resolution binary volumes or surface meshes) as input shapes to construct the SSM. Unordered 3D point clou&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.05610v2-abstract-full').style.display = 'inline'; document.getElementById('2305.05610v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.05610v2-abstract-full" style="display: none;"> Statistical Shape Modeling (SSM) is a valuable tool for investigating and quantifying anatomical variations within populations of anatomies. However, traditional correspondence-based SSM generation methods have a prohibitive inference process and require complete geometric proxies (e.g., high-resolution binary volumes or surface meshes) as input shapes to construct the SSM. Unordered 3D point cloud representations of shapes are more easily acquired from various medical imaging practices (e.g., thresholded images and surface scanning). Point cloud deep networks have recently achieved remarkable success in learning permutation-invariant features for different point cloud tasks (e.g., completion, semantic segmentation, classification). However, their application to learning SSM from point clouds is to-date unexplored. In this work, we demonstrate that existing point cloud encoder-decoder-based completion networks can provide an untapped potential for SSM, capturing population-level statistical representations of shapes while reducing the inference burden and relaxing the input requirement. We discuss the limitations of these techniques to the SSM application and suggest future improvements. Our work paves the way for further exploration of point cloud deep learning for SSM, a promising avenue for advancing shape analysis literature and broadening SSM to diverse use cases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.05610v2-abstract-full').style.display = 'none'; document.getElementById('2305.05610v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to MICCAI 2023. 13 pages, 5 figures, appendix</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.05113">arXiv:2305.05113</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.05113">pdf</a>, <a href="https://arxiv.org/format/2305.05113">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> </div> <p class="title is-5 mathjax"> Object-Centric Alignments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liss%2C+L">Lukas Liss</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+N">Jan Niklas Adams</a>, <a href="/search/cs?searchtype=author&amp;query=van+der+Aalst%2C+W+M+P">Wil M. P. van der Aalst</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.05113v1-abstract-short" style="display: inline;"> Processes tend to interact with other processes and operate on various objects of different types. These objects can influence each other creating dependencies between sub-processes. Analyzing the conformance of such complex processes challenges traditional conformance-checking approaches because they assume a single-case identifier for a process. To create a single-case identifier one has to flat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.05113v1-abstract-full').style.display = 'inline'; document.getElementById('2305.05113v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.05113v1-abstract-full" style="display: none;"> Processes tend to interact with other processes and operate on various objects of different types. These objects can influence each other creating dependencies between sub-processes. Analyzing the conformance of such complex processes challenges traditional conformance-checking approaches because they assume a single-case identifier for a process. To create a single-case identifier one has to flatten complex processes. This leads to information loss when separating the processes that interact on some objects. This paper introduces an alignment approach that operates directly on these object-centric processes. We introduce alignments that can give behavior-based insights into how closely related the event data generated by a process and the behavior specified by an object-centric Petri net are. The contributions of this paper include a definition for object-centric alignments, an algorithm to compute them, a publicly available implementation, and a qualitative and quantitative evaluation. The qualitative evaluation shows that object-centric alignments can give better insights into object-centric processes because they correctly consider inter-object dependencies. Findings from the quantitative evaluation show that the run-time grows exponentially with the number of objects, the length of the process execution, and the cost of the alignment. The evaluation results motivate future research to improve the run-time and make object-centric alignments more applicable for larger processes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.05113v1-abstract-full').style.display = 'none'; document.getElementById('2305.05113v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.07051">arXiv:2304.07051</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2304.07051">pdf</a>, <a href="https://arxiv.org/format/2304.07051">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> The Second Monocular Depth Estimation Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Spencer%2C+J">Jaime Spencer</a>, <a href="/search/cs?searchtype=author&amp;query=Qian%2C+C+S">C. Stella Qian</a>, <a href="/search/cs?searchtype=author&amp;query=Trescakova%2C+M">Michaela Trescakova</a>, <a href="/search/cs?searchtype=author&amp;query=Russell%2C+C">Chris Russell</a>, <a href="/search/cs?searchtype=author&amp;query=Hadfield%2C+S">Simon Hadfield</a>, <a href="/search/cs?searchtype=author&amp;query=Graf%2C+E+W">Erich W. Graf</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+W+J">Wendy J. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Schofield%2C+A+J">Andrew J. Schofield</a>, <a href="/search/cs?searchtype=author&amp;query=Elder%2C+J">James Elder</a>, <a href="/search/cs?searchtype=author&amp;query=Bowden%2C+R">Richard Bowden</a>, <a href="/search/cs?searchtype=author&amp;query=Anwar%2C+A">Ali Anwar</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Hao Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xiaozhi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+K">Kai Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+Y">Yuchao Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Hoa%2C+H+T">Huynh Thai Hoa</a>, <a href="/search/cs?searchtype=author&amp;query=Hossain%2C+S">Sadat Hossain</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+J">Jianmian Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Jing%2C+M">Mohan Jing</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+B">Bo Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+C">Chao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+B">Baojun Li</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhiwen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Mattoccia%2C+S">Stefano Mattoccia</a>, <a href="/search/cs?searchtype=author&amp;query=Mercelis%2C+S">Siegfried Mercelis</a> , et al. (18 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.07051v3-abstract-short" style="display: inline;"> This paper discusses the results for the second edition of the Monocular Depth Estimation Challenge (MDEC). This edition was open to methods using any form of supervision, including fully-supervised, self-supervised, multi-task or proxy depth. The challenge was based around the SYNS-Patches dataset, which features a wide diversity of environments with high-quality dense ground-truth. This includes&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.07051v3-abstract-full').style.display = 'inline'; document.getElementById('2304.07051v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.07051v3-abstract-full" style="display: none;"> This paper discusses the results for the second edition of the Monocular Depth Estimation Challenge (MDEC). This edition was open to methods using any form of supervision, including fully-supervised, self-supervised, multi-task or proxy depth. The challenge was based around the SYNS-Patches dataset, which features a wide diversity of environments with high-quality dense ground-truth. This includes complex natural environments, e.g. forests or fields, which are greatly underrepresented in current benchmarks. The challenge received eight unique submissions that outperformed the provided SotA baseline on any of the pointcloud- or image-based metrics. The top supervised submission improved relative F-Score by 27.62%, while the top self-supervised improved it by 16.61%. Supervised submissions generally leveraged large collections of datasets to improve data diversity. Self-supervised submissions instead updated the network architecture and pretrained backbones. These results represent a significant progress in the field, while highlighting avenues for future research, such as reducing interpolation artifacts at depth boundaries, improving self-supervised indoor performance and overall natural image accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.07051v3-abstract-full').style.display = 'none'; document.getElementById('2304.07051v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published at CVPRW2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.17573">arXiv:2303.17573</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.17573">pdf</a>, <a href="https://arxiv.org/format/2303.17573">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Using AI to Measure Parkinson&#39;s Disease Severity at Home </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+S">Md Saiful Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Rahman%2C+W">Wasifur Rahman</a>, <a href="/search/cs?searchtype=author&amp;query=Abdelkader%2C+A">Abdelrahman Abdelkader</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+P+T">Phillip T. Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+S">Sangwu Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+L">Jamie L. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Schneider%2C+R+B">Ruth B. Schneider</a>, <a href="/search/cs?searchtype=author&amp;query=Dorsey%2C+E+R">E. Ray Dorsey</a>, <a href="/search/cs?searchtype=author&amp;query=Hoque%2C+E">Ehsan Hoque</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.17573v4-abstract-short" style="display: inline;"> We present an artificial intelligence system to remotely assess the motor performance of individuals with Parkinson&#39;s disease (PD). Participants performed a motor task (i.e., tapping fingers) in front of a webcam, and data from 250 global participants were rated by three expert neurologists following the Movement Disorder Society Unified Parkinson&#39;s Disease Rating Scale (MDS-UPDRS). The neurologis&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.17573v4-abstract-full').style.display = 'inline'; document.getElementById('2303.17573v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.17573v4-abstract-full" style="display: none;"> We present an artificial intelligence system to remotely assess the motor performance of individuals with Parkinson&#39;s disease (PD). Participants performed a motor task (i.e., tapping fingers) in front of a webcam, and data from 250 global participants were rated by three expert neurologists following the Movement Disorder Society Unified Parkinson&#39;s Disease Rating Scale (MDS-UPDRS). The neurologists&#39; ratings were highly reliable, with an intra-class correlation coefficient (ICC) of 0.88. We developed computer algorithms to obtain objective measurements that align with the MDS-UPDRS guideline and are strongly correlated with the neurologists&#39; ratings. Our machine learning model trained on these measures outperformed an MDS-UPDRS certified rater, with a mean absolute error (MAE) of 0.59 compared to the rater&#39;s MAE of 0.79. However, the model performed slightly worse than the expert neurologists (0.53 MAE). The methodology can be replicated for similar motor tasks, providing the possibility of evaluating individuals with PD and other movement disorders remotely, objectively, and in areas with limited access to neurological care. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.17573v4-abstract-full').style.display = 'none'; document.getElementById('2303.17573v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.12937">arXiv:2303.12937</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.12937">pdf</a>, <a href="https://arxiv.org/format/2303.12937">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Wireless Network Demands of Data Products from Small Uncrewed Aerial Systems at Hurricane Ian </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Manzini%2C+T">Thomas Manzini</a>, <a href="/search/cs?searchtype=author&amp;query=Murphy%2C+R">Robin Murphy</a>, <a href="/search/cs?searchtype=author&amp;query=Merrick%2C+D">David Merrick</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Justin Adams</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.12937v3-abstract-short" style="display: inline;"> Data collected at Hurricane Ian (2022) quantifies the demands that small uncrewed aerial systems (UAS), or drones, place on the network communication infrastructure and identifies gaps in the field. Drones have been increasingly used since Hurricane Katrina (2005) for disaster response, however getting the data from the drone to the appropriate decision makers throughout incident command in a time&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.12937v3-abstract-full').style.display = 'inline'; document.getElementById('2303.12937v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.12937v3-abstract-full" style="display: none;"> Data collected at Hurricane Ian (2022) quantifies the demands that small uncrewed aerial systems (UAS), or drones, place on the network communication infrastructure and identifies gaps in the field. Drones have been increasingly used since Hurricane Katrina (2005) for disaster response, however getting the data from the drone to the appropriate decision makers throughout incident command in a timely fashion has been problematic. These delays have persisted even as countries such as the USA have made significant investments in wireless infrastructure, rapidly deployable nodes, and an increase in commercial satellite solutions. Hurricane Ian serves as a case study of the mismatch between communications needs and capabilities. In the first four days of the response, nine drone teams flew 34 missions under the direction of the State of Florida FL-UAS1, generating 636GB of data. The teams had access to six different wireless communications networks but had to resort to physically transferring data to the nearest intact emergency operations center in order to make the data available to the relevant agencies. The analysis of the mismatch contributes a model of the drone data-to-decision workflow in a disaster and quantifies wireless network communication requirements throughout the workflow in five factors. Four of the factors-availability, bandwidth, burstiness, and spatial distribution-were previously identified from analyses of Hurricanes Harvey (2017) and Michael (2018). This work adds upload rate as a fifth attribute. The analysis is expected to improve drone design and edge computing schemes as well as inform wireless communication research and development. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.12937v3-abstract-full').style.display = 'none'; document.getElementById('2303.12937v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.03486">arXiv:2303.03486</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.03486">pdf</a>, <a href="https://arxiv.org/format/2303.03486">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Sampling-based Exploration for Reinforcement Learning of Dexterous Manipulation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Khandate%2C+G">Gagan Khandate</a>, <a href="/search/cs?searchtype=author&amp;query=Shang%2C+S">Siqi Shang</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+E+T">Eric T. Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Saidi%2C+T+L">Tristan Luca Saidi</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Dennis%2C+S+M">Seth Matthew Dennis</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Johnson Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Ciocarlie%2C+M">Matei Ciocarlie</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.03486v3-abstract-short" style="display: inline;"> In this paper, we present a novel method for achieving dexterous manipulation of complex objects, while simultaneously securing the object without the use of passive support surfaces. We posit that a key difficulty for training such policies in a Reinforcement Learning framework is the difficulty of exploring the problem state space, as the accessible regions of this space form a complex structure&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.03486v3-abstract-full').style.display = 'inline'; document.getElementById('2303.03486v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.03486v3-abstract-full" style="display: none;"> In this paper, we present a novel method for achieving dexterous manipulation of complex objects, while simultaneously securing the object without the use of passive support surfaces. We posit that a key difficulty for training such policies in a Reinforcement Learning framework is the difficulty of exploring the problem state space, as the accessible regions of this space form a complex structure along manifolds of a high-dimensional space. To address this challenge, we use two versions of the non-holonomic Rapidly-Exploring Random Trees algorithm; one version is more general, but requires explicit use of the environment&#39;s transition function, while the second version uses manipulation-specific kinematic constraints to attain better sample efficiency. In both cases, we use states found via sampling-based exploration to generate reset distributions that enable training control policies under full dynamic constraints via model-free Reinforcement Learning. We show that these policies are effective at manipulation problems of higher difficulty than previously shown, and also transfer effectively to real robots. Videos of the real-hand demonstrations can be found on the project website: https://sbrl.cs.columbia.edu/ <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.03486v3-abstract-full').style.display = 'none'; document.getElementById('2303.03486v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 7 figures, accepted at Robotics Science &amp; Systems 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.12378">arXiv:2302.12378</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2302.12378">pdf</a>, <a href="https://arxiv.org/format/2302.12378">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cosmology and Nongalactic Astrophysics">astro-ph.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> </div> </div> <p class="title is-5 mathjax"> Cosmic Microwave Background Recovery: A Graph-Based Bayesian Convolutional Network Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+S">Steven Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Gorski%2C+K+M">Krzysztof M. Gorski</a>, <a href="/search/cs?searchtype=author&amp;query=Rocha%2C+G">Graca Rocha</a>, <a href="/search/cs?searchtype=author&amp;query=Wagstaff%2C+K+L">Kiri L. Wagstaff</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.12378v1-abstract-short" style="display: inline;"> The cosmic microwave background (CMB) is a significant source of knowledge about the origin and evolution of our universe. However, observations of the CMB are contaminated by foreground emissions, obscuring the CMB signal and reducing its efficacy in constraining cosmological parameters. We employ deep learning as a data-driven approach to CMB cleaning from multi-frequency full-sky maps. In parti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.12378v1-abstract-full').style.display = 'inline'; document.getElementById('2302.12378v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.12378v1-abstract-full" style="display: none;"> The cosmic microwave background (CMB) is a significant source of knowledge about the origin and evolution of our universe. However, observations of the CMB are contaminated by foreground emissions, obscuring the CMB signal and reducing its efficacy in constraining cosmological parameters. We employ deep learning as a data-driven approach to CMB cleaning from multi-frequency full-sky maps. In particular, we develop a graph-based Bayesian convolutional neural network based on the U-Net architecture that predicts cleaned CMB with pixel-wise uncertainty estimates. We demonstrate the potential of this technique on realistic simulated data based on the Planck mission. We show that our model accurately recovers the cleaned CMB sky map and resulting angular power spectrum while identifying regions of uncertainty. Finally, we discuss the current challenges and the path forward for deploying our model for CMB recovery on real observations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.12378v1-abstract-full').style.display = 'none'; document.getElementById('2302.12378v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published at the Thirty-fifth Annual Conference on Innovative Applications of Artificial Intelligence (IAAI-23). 7 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.11232">arXiv:2212.11232</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2212.11232">pdf</a>, <a href="https://arxiv.org/ps/2212.11232">ps</a>, <a href="https://arxiv.org/format/2212.11232">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> One Artist&#39;s Personal Reflections on Methods and Ethics of Creating Mixed Media Artificial Intelligence Art </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jane Adams</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.11232v1-abstract-short" style="display: inline;"> I intend to make a scientific contribution of my subjective experience as a single unit of self-described ``artist&#39;&#39; leveraging artificial intelligence as an assistive visual creation tool, in the hopes that it may provide some inspiration or deeper meaning for fellow artists and computer scientists in this medium. First, I will provide some background on my personal history thus far as an artist.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.11232v1-abstract-full').style.display = 'inline'; document.getElementById('2212.11232v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.11232v1-abstract-full" style="display: none;"> I intend to make a scientific contribution of my subjective experience as a single unit of self-described ``artist&#39;&#39; leveraging artificial intelligence as an assistive visual creation tool, in the hopes that it may provide some inspiration or deeper meaning for fellow artists and computer scientists in this medium. First, I will provide some background on my personal history thus far as an artist. Neither artist nor scientist can exist in a vaccuum, so I then will provide some (albeit a non-exhaustive list of) related work that has helped me contextualize my own work and thinking in this area. I often consider my methods in the creative process chronologically, so I have divided that section according to the loose structure of my artistic workflow. These foundations provide a fertile grounding for discussion around topics of subject matter, reception, community, and ethics. I then conclude with some ideas for future work in the realms of theory of authorship, explainability tooling, and research framing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.11232v1-abstract-full').style.display = 'none'; document.getElementById('2212.11232v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, AAAI Workshop on Creative AI Across Modalities</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> K.4.0; I.2.m </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.03042">arXiv:2209.03042</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.03042">pdf</a>, <a href="https://arxiv.org/format/2209.03042">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/1748-0221/17/11/P11003">10.1088/1748-0221/17/11/P11003 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Graph Neural Networks for Low-Energy Event Classification &amp; Reconstruction in IceCube </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Abbasi%2C+R">R. Abbasi</a>, <a href="/search/cs?searchtype=author&amp;query=Ackermann%2C+M">M. Ackermann</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">J. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Aggarwal%2C+N">N. Aggarwal</a>, <a href="/search/cs?searchtype=author&amp;query=Aguilar%2C+J+A">J. A. Aguilar</a>, <a href="/search/cs?searchtype=author&amp;query=Ahlers%2C+M">M. Ahlers</a>, <a href="/search/cs?searchtype=author&amp;query=Ahrens%2C+M">M. Ahrens</a>, <a href="/search/cs?searchtype=author&amp;query=Alameddine%2C+J+M">J. M. Alameddine</a>, <a href="/search/cs?searchtype=author&amp;query=Alves%2C+A+A">A. A. Alves Jr.</a>, <a href="/search/cs?searchtype=author&amp;query=Amin%2C+N+M">N. M. Amin</a>, <a href="/search/cs?searchtype=author&amp;query=Andeen%2C+K">K. Andeen</a>, <a href="/search/cs?searchtype=author&amp;query=Anderson%2C+T">T. Anderson</a>, <a href="/search/cs?searchtype=author&amp;query=Anton%2C+G">G. Anton</a>, <a href="/search/cs?searchtype=author&amp;query=Arg%C3%BCelles%2C+C">C. Arg眉elles</a>, <a href="/search/cs?searchtype=author&amp;query=Ashida%2C+Y">Y. Ashida</a>, <a href="/search/cs?searchtype=author&amp;query=Athanasiadou%2C+S">S. Athanasiadou</a>, <a href="/search/cs?searchtype=author&amp;query=Axani%2C+S">S. Axani</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+X">X. Bai</a>, <a href="/search/cs?searchtype=author&amp;query=V.%2C+A+B">A. Balagopal V.</a>, <a href="/search/cs?searchtype=author&amp;query=Baricevic%2C+M">M. Baricevic</a>, <a href="/search/cs?searchtype=author&amp;query=Barwick%2C+S+W">S. W. Barwick</a>, <a href="/search/cs?searchtype=author&amp;query=Basu%2C+V">V. Basu</a>, <a href="/search/cs?searchtype=author&amp;query=Bay%2C+R">R. Bay</a>, <a href="/search/cs?searchtype=author&amp;query=Beatty%2C+J+J">J. J. Beatty</a>, <a href="/search/cs?searchtype=author&amp;query=Becker%2C+K+-">K. -H. Becker</a> , et al. (359 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.03042v3-abstract-short" style="display: inline;"> IceCube, a cubic-kilometer array of optical sensors built to detect atmospheric and astrophysical neutrinos between 1 GeV and 1 PeV, is deployed 1.45 km to 2.45 km below the surface of the ice sheet at the South Pole. The classification and reconstruction of events from the in-ice detectors play a central role in the analysis of data from IceCube. Reconstructing and classifying events is a challen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.03042v3-abstract-full').style.display = 'inline'; document.getElementById('2209.03042v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.03042v3-abstract-full" style="display: none;"> IceCube, a cubic-kilometer array of optical sensors built to detect atmospheric and astrophysical neutrinos between 1 GeV and 1 PeV, is deployed 1.45 km to 2.45 km below the surface of the ice sheet at the South Pole. The classification and reconstruction of events from the in-ice detectors play a central role in the analysis of data from IceCube. Reconstructing and classifying events is a challenge due to the irregular detector geometry, inhomogeneous scattering and absorption of light in the ice and, below 100 GeV, the relatively low number of signal photons produced per event. To address this challenge, it is possible to represent IceCube events as point cloud graphs and use a Graph Neural Network (GNN) as the classification and reconstruction method. The GNN is capable of distinguishing neutrino events from cosmic-ray backgrounds, classifying different neutrino event types, and reconstructing the deposited energy, direction and interaction vertex. Based on simulation, we provide a comparison in the 1-100 GeV energy range to the current state-of-the-art maximum likelihood techniques used in current IceCube analyses, including the effects of known systematic uncertainties. For neutrino event classification, the GNN increases the signal efficiency by 18% at a fixed false positive rate (FPR), compared to current IceCube methods. Alternatively, the GNN offers a reduction of the FPR by over a factor 8 (to below half a percent) at a fixed signal efficiency. For the reconstruction of energy, direction, and interaction vertex, the resolution improves by an average of 13%-20% compared to current maximum likelihood techniques in the energy range of 1-30 GeV. The GNN, when run on a GPU, is capable of processing IceCube events at a rate nearly double of the median IceCube trigger rate of 2.7 kHz, which opens the possibility of using low energy neutrinos in online searches for transient events. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.03042v3-abstract-full').style.display = 'none'; document.getElementById('2209.03042v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Prepared for submission to JINST</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.02736">arXiv:2209.02736</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.02736">pdf</a>, <a href="https://arxiv.org/format/2209.02736">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Spatiotemporal Cardiac Statistical Shape Modeling: A Data-Driven Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Khan%2C+N">Nawazish Khan</a>, <a href="/search/cs?searchtype=author&amp;query=Morris%2C+A">Alan Morris</a>, <a href="/search/cs?searchtype=author&amp;query=Elhabian%2C+S">Shireen Elhabian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.02736v1-abstract-short" style="display: inline;"> Clinical investigations of anatomy&#39;s structural changes over time could greatly benefit from population-level quantification of shape, or spatiotemporal statistic shape modeling (SSM). Such a tool enables characterizing patient organ cycles or disease progression in relation to a cohort of interest. Constructing shape models requires establishing a quantitative shape representation (e.g., correspo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.02736v1-abstract-full').style.display = 'inline'; document.getElementById('2209.02736v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.02736v1-abstract-full" style="display: none;"> Clinical investigations of anatomy&#39;s structural changes over time could greatly benefit from population-level quantification of shape, or spatiotemporal statistic shape modeling (SSM). Such a tool enables characterizing patient organ cycles or disease progression in relation to a cohort of interest. Constructing shape models requires establishing a quantitative shape representation (e.g., corresponding landmarks). Particle-based shape modeling (PSM) is a data-driven SSM approach that captures population-level shape variations by optimizing landmark placement. However, it assumes cross-sectional study designs and hence has limited statistical power in representing shape changes over time. Existing methods for modeling spatiotemporal or longitudinal shape changes require predefined shape atlases and pre-built shape models that are typically constructed cross-sectionally. This paper proposes a data-driven approach inspired by the PSM method to learn population-level spatiotemporal shape changes directly from shape data. We introduce a novel SSM optimization scheme that produces landmarks that are in correspondence both across the population (inter-subject) and across time-series (intra-subject). We apply the proposed method to 4D cardiac data from atrial-fibrillation patients and demonstrate its efficacy in representing the dynamic change of the left atrium. Furthermore, we show that our method outperforms an image-based approach for spatiotemporal SSM with respect to a generative time-series model, the Linear Dynamical System (LDS). LDS fit using a spatiotemporal shape model optimized via our approach provides better generalization and specificity, indicating it accurately captures the underlying time-dependency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.02736v1-abstract-full').style.display = 'none'; document.getElementById('2209.02736v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in the Statistical Atlases and Computational Modeling of the Heart (STACOM) workshop, part of the 25th International Conference on Medical Image Computing and Computer Assisted Intervention, MICCAI 2022. To be published in a Lecture Notes in Computer Science proceeding published by Springer</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.01219">arXiv:2209.01219</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.01219">pdf</a>, <a href="https://arxiv.org/format/2209.01219">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> </div> <p class="title is-5 mathjax"> A Framework for Extracting and Encoding Features from Object-Centric Event Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+N">Jan Niklas Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Park%2C+G">Gyunam Park</a>, <a href="/search/cs?searchtype=author&amp;query=Levich%2C+S">Sergej Levich</a>, <a href="/search/cs?searchtype=author&amp;query=Schuster%2C+D">Daniel Schuster</a>, <a href="/search/cs?searchtype=author&amp;query=van+der+Aalst%2C+W+M+P">Wil M. P. van der Aalst</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.01219v1-abstract-short" style="display: inline;"> Traditional process mining techniques take event data as input where each event is associated with exactly one object. An object represents the instantiation of a process. Object-centric event data contain events associated with multiple objects expressing the interaction of multiple processes. As traditional process mining techniques assume events associated with exactly one object, these techniq&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.01219v1-abstract-full').style.display = 'inline'; document.getElementById('2209.01219v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.01219v1-abstract-full" style="display: none;"> Traditional process mining techniques take event data as input where each event is associated with exactly one object. An object represents the instantiation of a process. Object-centric event data contain events associated with multiple objects expressing the interaction of multiple processes. As traditional process mining techniques assume events associated with exactly one object, these techniques cannot be applied to object-centric event data. To use traditional process mining techniques, the object-centric event data are flattened by removing all object references but one. The flattening process is lossy, leading to inaccurate features extracted from flattened data. Furthermore, the graph-like structure of object-centric event data is lost when flattening. In this paper, we introduce a general framework for extracting and encoding features from object-centric event data. We calculate features natively on the object-centric event data, leading to accurate measures. Furthermore, we provide three encodings for these features: tabular, sequential, and graph-based. While tabular and sequential encodings have been heavily used in process mining, the graph-based encoding is a new technique preserving the structure of the object-centric event data. We provide six use cases: a visualization and a prediction use case for each of the three encodings. We use explainable AI in the prediction use cases to show the utility of both the object-centric features and the structure of the sequential and graph-based encoding for a predictive model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.01219v1-abstract-full').style.display = 'none'; document.getElementById('2209.01219v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.05080">arXiv:2208.05080</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2208.05080">pdf</a>, <a href="https://arxiv.org/format/2208.05080">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1371/journal.pone.0275283">10.1371/journal.pone.0275283 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Mathematical measures of societal polarisation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+A">Johnathan A. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=White%2C+G">Gentry White</a>, <a href="/search/cs?searchtype=author&amp;query=Araujo%2C+R+P">Robyn P. Araujo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.05080v1-abstract-short" style="display: inline;"> In opinion dynamics, as in general usage, polarisation is subjective. To understand polarisation, we need to develop more precise methods to measure the agreement in society. This paper presents four mathematical measures of polarisation derived from graph and network representations of societies and information theoretic divergences or distance metrics. Two of the methods, min-max flow and spectr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.05080v1-abstract-full').style.display = 'inline'; document.getElementById('2208.05080v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.05080v1-abstract-full" style="display: none;"> In opinion dynamics, as in general usage, polarisation is subjective. To understand polarisation, we need to develop more precise methods to measure the agreement in society. This paper presents four mathematical measures of polarisation derived from graph and network representations of societies and information theoretic divergences or distance metrics. Two of the methods, min-max flow and spectral radius, rely on graph theory and define polarisation in terms of the structural characteristics of networks. The other two methods represent opinions as probability density functions and use the Kullback Leibler divergence and the Hellinger distance as polarisation measures. We present a series of opinion dynamics simulations from two common models to test the effectiveness of the methods. Results show that the four measures provide insight into the different aspects of polarisation and allow real-time monitoring of social networks for indicators of polarisation. The three measures, the spectral radius, Kullback Leibler divergence and Hellinger distance, smoothly delineated between different amounts of polarisation, i.e. how many cluster there were in the simulation, while also measuring with more granularity how close simulations were to consensus. Min-max flow failed to accomplish such nuance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.05080v1-abstract-full').style.display = 'none'; document.getElementById('2208.05080v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">28 pages, 10 figures, 1 support figure</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.03235">arXiv:2208.03235</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2208.03235">pdf</a>, <a href="https://arxiv.org/format/2208.03235">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Defining Cases and Variants for Object-Centric Event Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+N">Jan Niklas Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Schuster%2C+D">Daniel Schuster</a>, <a href="/search/cs?searchtype=author&amp;query=Schmitz%2C+S">Seth Schmitz</a>, <a href="/search/cs?searchtype=author&amp;query=Schuh%2C+G">G眉nther Schuh</a>, <a href="/search/cs?searchtype=author&amp;query=van+der+Aalst%2C+W+M+P">Wil M. P. van der Aalst</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.03235v1-abstract-short" style="display: inline;"> The execution of processes leaves traces of event data in information systems. These event data can be analyzed through process mining techniques. For traditional process mining techniques, one has to associate each event with exactly one object, e.g., the company&#39;s customer. Events related to one object form an event sequence called a case. A case describes an end-to-end run through a process. Th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.03235v1-abstract-full').style.display = 'inline'; document.getElementById('2208.03235v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.03235v1-abstract-full" style="display: none;"> The execution of processes leaves traces of event data in information systems. These event data can be analyzed through process mining techniques. For traditional process mining techniques, one has to associate each event with exactly one object, e.g., the company&#39;s customer. Events related to one object form an event sequence called a case. A case describes an end-to-end run through a process. The cases contained in event data can be used to discover a process model, detect frequent bottlenecks, or learn predictive models. However, events encountered in real-life information systems, e.g., ERP systems, can often be associated with multiple objects. The traditional sequential case concept falls short of these object-centric event data as these data exhibit a graph structure. One might force object-centric event data into the traditional case concept by flattening it. However, flattening manipulates the data and removes information. Therefore, a concept analogous to the case concept of traditional event logs is necessary to enable the application of different process mining tasks on object-centric event data. In this paper, we introduce the case concept for object-centric process mining: process executions. These are graph-based generalizations of cases as considered in traditional process mining. Furthermore, we provide techniques to extract process executions. Based on these executions, we determine equivalent process behavior with respect to an attribute using graph isomorphism. Equivalent process executions with respect to the event&#39;s activity are object-centric variants, i.e., a generalization of variants in traditional process mining. We provide a visualization technique for object-centric variants. The contribution&#39;s scalability and efficiency are extensively evaluated. Furthermore, we provide a case study showing the most frequent object-centric variants of a real-life event log. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.03235v1-abstract-full').style.display = 'none'; document.getElementById('2208.03235v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.06862">arXiv:2205.06862</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.06862">pdf</a>, <a href="https://arxiv.org/format/2205.06862">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> From Images to Probabilistic Anatomical Shapes: A Deep Variational Bottleneck Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Elhabian%2C+S">Shireen Elhabian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.06862v1-abstract-short" style="display: inline;"> Statistical shape modeling (SSM) directly from 3D medical images is an underutilized tool for detecting pathology, diagnosing disease, and conducting population-level morphology analysis. Deep learning frameworks have increased the feasibility of adopting SSM in medical practice by reducing the expert-driven manual and computational overhead in traditional SSM workflows. However, translating such&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.06862v1-abstract-full').style.display = 'inline'; document.getElementById('2205.06862v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.06862v1-abstract-full" style="display: none;"> Statistical shape modeling (SSM) directly from 3D medical images is an underutilized tool for detecting pathology, diagnosing disease, and conducting population-level morphology analysis. Deep learning frameworks have increased the feasibility of adopting SSM in medical practice by reducing the expert-driven manual and computational overhead in traditional SSM workflows. However, translating such frameworks to clinical practice requires calibrated uncertainty measures as neural networks can produce over-confident predictions that cannot be trusted in sensitive clinical decision-making. Existing techniques for predicting shape with aleatoric (data-dependent) uncertainty utilize a principal component analysis (PCA) based shape representation computed in isolation from the model training. This constraint restricts the learning task to solely estimating pre-defined shape descriptors from 3D images and imposes a linear relationship between this shape representation and the output (i.e., shape) space. In this paper, we propose a principled framework based on the variational information bottleneck theory to relax these assumptions while predicting probabilistic shapes of anatomy directly from images without supervised encoding of shape descriptors. Here, the latent representation is learned in the context of the learning task, resulting in a more scalable, flexible model that better captures data non-linearity. Additionally, this model is self-regularized and generalizes better given limited training data. Our experiments demonstrate that the proposed method provides improved accuracy and better calibrated aleatoric uncertainty estimates than state-of-the-art methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.06862v1-abstract-full').style.display = 'none'; document.getElementById('2205.06862v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Provisionally accepted to MICCAI 2022 on May 4, 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.10662">arXiv:2204.10662</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2204.10662">pdf</a>, <a href="https://arxiv.org/format/2204.10662">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-031-17995-2_20">10.1007/978-3-031-17995-2_20 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> OPerA: Object-Centric Performance Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Park%2C+G">Gyunam Park</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+N">Jan Niklas Adams</a>, <a href="/search/cs?searchtype=author&amp;query=van+der+Aalst%2C+W+M+P">Wil. M. P. van der Aalst</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.10662v2-abstract-short" style="display: inline;"> Performance analysis in process mining aims to provide insights on the performance of a business process by using a process model as a formal representation of the process. Such insights are reliably interpreted by process analysts in the context of a model with formal semantics. Existing techniques for performance analysis assume that a single case notion exists in a business process (e.g., a pat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.10662v2-abstract-full').style.display = 'inline'; document.getElementById('2204.10662v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.10662v2-abstract-full" style="display: none;"> Performance analysis in process mining aims to provide insights on the performance of a business process by using a process model as a formal representation of the process. Such insights are reliably interpreted by process analysts in the context of a model with formal semantics. Existing techniques for performance analysis assume that a single case notion exists in a business process (e.g., a patient in healthcare process). However, in reality, different objects might interact (e.g., order, item, delivery, and invoice in an O2C process). In such a setting, traditional techniques may yield misleading or even incorrect insights on performance metrics such as waiting time. More importantly, by considering the interaction between objects, we can define object-centric performance metrics such as synchronization time, pooling time, and lagging time. In this work, we propose a novel approach to performance analysis considering multiple case notions by using object-centric Petri nets as formal representations of business processes. The proposed approach correctly computes existing performance metrics, while supporting the derivation of newly-introduced object-centric performance metrics. We have implemented the approach as a web application and conducted a case study based on a real-life loan application process. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.10662v2-abstract-full').style.display = 'none'; document.getElementById('2204.10662v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> LNCS 13607 (2022) 281-292 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.07152">arXiv:2110.07152</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.07152">pdf</a>, <a href="https://arxiv.org/format/2110.07152">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> DeepSSM: A Blueprint for Image-to-Shape Deep Learning Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bhalodia%2C+R">Riddhish Bhalodia</a>, <a href="/search/cs?searchtype=author&amp;query=Elhabian%2C+S">Shireen Elhabian</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jadie Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Tao%2C+W">Wenzheng Tao</a>, <a href="/search/cs?searchtype=author&amp;query=Kavan%2C+L">Ladislav Kavan</a>, <a href="/search/cs?searchtype=author&amp;query=Whitaker%2C+R">Ross Whitaker</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.07152v2-abstract-short" style="display: inline;"> Statistical shape modeling (SSM) characterizes anatomical variations in a population of shapes generated from medical images. SSM requires consistent shape representation across samples in shape cohort. Establishing this representation entails a processing pipeline that includes anatomy segmentation, re-sampling, registration, and non-linear optimization. These shape representations are then used&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.07152v2-abstract-full').style.display = 'inline'; document.getElementById('2110.07152v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.07152v2-abstract-full" style="display: none;"> Statistical shape modeling (SSM) characterizes anatomical variations in a population of shapes generated from medical images. SSM requires consistent shape representation across samples in shape cohort. Establishing this representation entails a processing pipeline that includes anatomy segmentation, re-sampling, registration, and non-linear optimization. These shape representations are then used to extract low-dimensional shape descriptors that facilitate subsequent analyses in different applications. However, the current process of obtaining these shape descriptors from imaging data relies on human and computational resources, requiring domain expertise for segmenting anatomies of interest. Moreover, this same taxing pipeline needs to be repeated to infer shape descriptors for new image data using a pre-trained/existing shape model. Here, we propose DeepSSM, a deep learning-based framework for learning the functional mapping from images to low-dimensional shape descriptors and their associated shape representations, thereby inferring statistical representation of anatomy directly from 3D images. Once trained using an existing shape model, DeepSSM circumvents the heavy and manual pre-processing and segmentation and significantly improves the computational time, making it a viable solution for fully end-to-end SSM applications. In addition, we introduce a model-based data-augmentation strategy to address data scarcity. Finally, this paper presents and analyzes two different architectural variants of DeepSSM with different loss functions using three medical datasets and their downstream clinical application. Experiments showcase that DeepSSM performs comparably or better to the state-of-the-art SSM both quantitatively and on application-driven downstream tasks. Therefore, DeepSSM aims to provide a comprehensive blueprint for deep learning-based image-to-shape models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.07152v2-abstract-full').style.display = 'none'; document.getElementById('2110.07152v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">pre-print</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.05375">arXiv:2110.05375</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.05375">pdf</a>, <a href="https://arxiv.org/format/2110.05375">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Precision and Fitness in Object-Centric Process Mining </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+N">Jan Niklas Adams</a>, <a href="/search/cs?searchtype=author&amp;query=van+der+Aalst%2C+W+M+P">Wil M. P. van der Aalst</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.05375v1-abstract-short" style="display: inline;"> Traditional process mining considers only one single case notion and discovers and analyzes models based on this. However, a single case notion is often not a realistic assumption in practice. Multiple case notions might interact and influence each other in a process. Object-centric process mining introduces the techniques and concepts to handle multiple case notions. So far, such event logs have&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.05375v1-abstract-full').style.display = 'inline'; document.getElementById('2110.05375v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.05375v1-abstract-full" style="display: none;"> Traditional process mining considers only one single case notion and discovers and analyzes models based on this. However, a single case notion is often not a realistic assumption in practice. Multiple case notions might interact and influence each other in a process. Object-centric process mining introduces the techniques and concepts to handle multiple case notions. So far, such event logs have been standardized and novel process model discovery techniques were proposed. However, notions for evaluating the quality of a model are missing. These are necessary to enable future research on improving object-centric discovery and providing an objective evaluation of model quality. In this paper, we introduce a notion for the precision and fitness of an object-centric Petri net with respect to an object-centric event log. We give a formal definition and accompany this with an example. Furthermore, we provide an algorithm to calculate these quality measures. We discuss our precision and fitness notion based on an event log with different models. Our precision and fitness notions are an appropriate way to generalize quality measures to the object-centric setting since we are able to consider multiple case notions, their dependencies and their interactions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.05375v1-abstract-full').style.display = 'none'; document.getElementById('2110.05375v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.12323">arXiv:2109.12323</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.12323">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning-Based Detection of the Acute Respiratory Distress Syndrome: What Are the Models Learning? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Rehm%2C+G+B">Gregory B. Rehm</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Cortes-Puch%2C+I">Irene Cortes-Puch</a>, <a href="/search/cs?searchtype=author&amp;query=Chuah%2C+C">Chen-Nee Chuah</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jason Adams</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.12323v1-abstract-short" style="display: inline;"> The acute respiratory distress syndrome (ARDS) is a severe form of hypoxemic respiratory failure with in-hospital mortality of 35-46%. High mortality is thought to be related in part to challenges in making a prompt diagnosis, which may in turn delay implementation of evidence-based therapies. A deep neural network (DNN) algorithm utilizing unbiased ventilator waveform data (VWD) may help to impro&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.12323v1-abstract-full').style.display = 'inline'; document.getElementById('2109.12323v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.12323v1-abstract-full" style="display: none;"> The acute respiratory distress syndrome (ARDS) is a severe form of hypoxemic respiratory failure with in-hospital mortality of 35-46%. High mortality is thought to be related in part to challenges in making a prompt diagnosis, which may in turn delay implementation of evidence-based therapies. A deep neural network (DNN) algorithm utilizing unbiased ventilator waveform data (VWD) may help to improve screening for ARDS. We first show that a convolutional neural network-based ARDS detection model can outperform prior work with random forest models in AUC (0.95+/-0.019 vs. 0.88+/-0.064), accuracy (0.84+/-0.026 vs 0.80+/-0.078), and specificity (0.81+/-0.06 vs 0.71+/-0.089). Frequency ablation studies imply that our model can learn features from low frequency domains typically used for expert feature engineering, and high-frequency information that may be difficult to manually featurize. Further experiments suggest that subtle, high-frequency components of physiologic signals may explain the superior performance of DL models over traditional ML when using physiologic waveform data. Our observations may enable improved interpretability of DL-based physiologic models and may improve the understanding of how high-frequency information in physiologic data impacts the performance our DL model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.12323v1-abstract-full').style.display = 'none'; document.getElementById('2109.12323v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.10224">arXiv:2109.10224</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.10224">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Clinical Validation of Single-Chamber Model-Based Algorithms Used to Estimate Respiratory Compliance </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Rehm%2C+G">Gregory Rehm</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+J">Jimmy Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Gilbeau%2C+C">Chelsea Gilbeau</a>, <a href="/search/cs?searchtype=author&amp;query=Bomactao%2C+M+T">Marc T Bomactao</a>, <a href="/search/cs?searchtype=author&amp;query=Chuah%2C+C">Chen-Nee Chuah</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">Jason Adams</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.10224v1-abstract-short" style="display: inline;"> Non-invasive estimation of respiratory physiology using computational algorithms promises to be a valuable technique for future clinicians to detect detrimental changes in patient pathophysiology. However, few clinical algorithms used to non-invasively analyze lung physiology have undergone rigorous validation in a clinical setting, and are often validated either using mechanical devices, or with&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.10224v1-abstract-full').style.display = 'inline'; document.getElementById('2109.10224v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.10224v1-abstract-full" style="display: none;"> Non-invasive estimation of respiratory physiology using computational algorithms promises to be a valuable technique for future clinicians to detect detrimental changes in patient pathophysiology. However, few clinical algorithms used to non-invasively analyze lung physiology have undergone rigorous validation in a clinical setting, and are often validated either using mechanical devices, or with small clinical validation datasets using 2-8 patients. This work aims to improve this situation by first, establishing an open, and clinically validated dataset comprising data from both mechanical lungs and nearly 40,000 breaths from 18 intubated patients. Next, we use this data to evaluate 15 different algorithms that use the &#34;single chamber&#34; model of estimating respiratory compliance. We evaluate these algorithms under varying clinical scenarios patients typically experience during hospitalization. In particular, we explore algorithm performance under four different types of patient ventilator asynchrony. We also analyze algorithms under varying ventilation modes to benchmark algorithm performance and to determine if ventilation mode has any impact on the algorithm. Our approach yields several advances by 1) showing which specific algorithms work best clinically under varying mode and asynchrony scenarios, 2) developing a simple mathematical method to reduce variance in algorithmic results, and 3) presenting additional insights about single-chamber model algorithms. We hope that our paper, approach, dataset, and software framework can thus be used by future researchers to improve their work and allow future integration of &#34;single chamber&#34; algorithms into clinical practice. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.10224v1-abstract-full').style.display = 'none'; document.getElementById('2109.10224v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.15017">arXiv:2106.15017</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.15017">pdf</a>, <a href="https://arxiv.org/format/2106.15017">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Early Mobility Recognition for Intensive Care Unit Patients Using Accelerometers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+R">Rex Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Fazio%2C+S+A">Sarina A Fazio</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Huanle Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ramli%2C+A+A">Albara Ah Ramli</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xin Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+Y">Jason Yeates Adams</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.15017v1-abstract-short" style="display: inline;"> With the development of the Internet of Things(IoT) and Artificial Intelligence(AI) technologies, human activity recognition has enabled various applications, such as smart homes and assisted living. In this paper, we target a new healthcare application of human activity recognition, early mobility recognition for Intensive Care Unit(ICU) patients. Early mobility is essential for ICU patients who&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.15017v1-abstract-full').style.display = 'inline'; document.getElementById('2106.15017v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.15017v1-abstract-full" style="display: none;"> With the development of the Internet of Things(IoT) and Artificial Intelligence(AI) technologies, human activity recognition has enabled various applications, such as smart homes and assisted living. In this paper, we target a new healthcare application of human activity recognition, early mobility recognition for Intensive Care Unit(ICU) patients. Early mobility is essential for ICU patients who suffer from long-time immobilization. Our system includes accelerometer-based data collection from ICU patients and an AI model to recognize patients&#39; early mobility. To improve the model accuracy and stability, we identify features that are insensitive to sensor orientations and propose a segment voting process that leverages a majority voting strategy to recognize each segment&#39;s activity. Our results show that our system improves model accuracy from 77.78\% to 81.86\% and reduces the model instability (standard deviation) from 16.69\% to 6.92\%, compared to the same AI model without our feature engineering and segment voting process. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.15017v1-abstract-full').style.display = 'none'; document.getElementById('2106.15017v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.10281">arXiv:2106.10281</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.10281">pdf</a>, <a href="https://arxiv.org/format/2106.10281">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> Say Their Names: Resurgence in the collective attention toward Black victims of fatal police violence following the death of George Floyd </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wu%2C+H+H">Henry H. Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Gallagher%2C+R+J">Ryan J. Gallagher</a>, <a href="/search/cs?searchtype=author&amp;query=Alshaabi%2C+T">Thayer Alshaabi</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+L">Jane L. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Minot%2C+J+R">Joshua R. Minot</a>, <a href="/search/cs?searchtype=author&amp;query=Arnold%2C+M+V">Michael V. Arnold</a>, <a href="/search/cs?searchtype=author&amp;query=Welles%2C+B+F">Brooke Foucault Welles</a>, <a href="/search/cs?searchtype=author&amp;query=Harp%2C+R">Randall Harp</a>, <a href="/search/cs?searchtype=author&amp;query=Dodds%2C+P+S">Peter Sheridan Dodds</a>, <a href="/search/cs?searchtype=author&amp;query=Danforth%2C+C+M">Christopher M. Danforth</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.10281v1-abstract-short" style="display: inline;"> The murder of George Floyd by police in May 2020 sparked international protests and renewed attention in the Black Lives Matter movement. Here, we characterize ways in which the online activity following George Floyd&#39;s death was unparalleled in its volume and intensity, including setting records for activity on Twitter, prompting the saddest day in the platform&#39;s history, and causing George Floyd&#39;&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.10281v1-abstract-full').style.display = 'inline'; document.getElementById('2106.10281v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.10281v1-abstract-full" style="display: none;"> The murder of George Floyd by police in May 2020 sparked international protests and renewed attention in the Black Lives Matter movement. Here, we characterize ways in which the online activity following George Floyd&#39;s death was unparalleled in its volume and intensity, including setting records for activity on Twitter, prompting the saddest day in the platform&#39;s history, and causing George Floyd&#39;s name to appear among the ten most frequently used phrases in a day, where he is the only individual to have ever received that level of attention who was not known to the public earlier that same week. Further, we find this attention extended beyond George Floyd and that more Black victims of fatal police violence received attention following his death than during other past moments in Black Lives Matter&#39;s history. We place that attention within the context of prior online racial justice activism by showing how the names of Black victims of police violence have been lifted and memorialized over the last 12 years on Twitter. Our results suggest that the 2020 wave of attention to the Black Lives Matter movement centered past instances of police violence in an unprecedented way, demonstrating the impact of the movement&#39;s rhetorical strategy to &#34;say their names.&#34; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.10281v1-abstract-full').style.display = 'none'; document.getElementById('2106.10281v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.05260">arXiv:2106.05260</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.05260">pdf</a>, <a href="https://arxiv.org/format/2106.05260">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Applications">stat.AP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> Sirius: Visualization of Mixed Features as a Mutual Information Network Graph </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+L">Jane L. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Deluca%2C+T+F">Todd F. Deluca</a>, <a href="/search/cs?searchtype=author&amp;query=Danforth%2C+C+M">Christopher M. Danforth</a>, <a href="/search/cs?searchtype=author&amp;query=Dodds%2C+P+S">Peter S. Dodds</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+Y">Yuhang Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Anastasakis%2C+K">Konstantinos Anastasakis</a>, <a href="/search/cs?searchtype=author&amp;query=Choi%2C+B">Boyoon Choi</a>, <a href="/search/cs?searchtype=author&amp;query=Min%2C+A">Allison Min</a>, <a href="/search/cs?searchtype=author&amp;query=Bessey%2C+M+M">Michael M. Bessey</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.05260v2-abstract-short" style="display: inline;"> Data scientists across disciplines are increasingly in need of exploratory analysis tools for data sets with a high volume of features of mixed data type (quantitative continuous and discrete categorical). We introduce Sirius, a novel visualization package for researchers to explore feature relationships among mixed data types using mutual information. The visualization of feature relationships ai&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.05260v2-abstract-full').style.display = 'inline'; document.getElementById('2106.05260v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.05260v2-abstract-full" style="display: none;"> Data scientists across disciplines are increasingly in need of exploratory analysis tools for data sets with a high volume of features of mixed data type (quantitative continuous and discrete categorical). We introduce Sirius, a novel visualization package for researchers to explore feature relationships among mixed data types using mutual information. The visualization of feature relationships aids data scientists in finding meaningful dependence among features prior to the development of predictive modeling pipelines, which can inform downstream analysis such as feature selection, feature extraction, and early detection of potential proxy variables. Using an information theoretic approach, Sirius supports network visualization of heterogeneous data sets (consisting of continuous and discrete data types), and provides a user interface for exploring feature pairs with locally significant mutual information scores. Mutual information algorithm and bivariate chart types are assigned on a data type pairing basis (continuous-continuous, discrete-discrete, and discrete-continuous). We show how this tool can be used for tasks such as hypothesis confirmation, identification of predictive features, suggestions for feature extraction, or early warning of data abnormalities. The accompanying website for this paper can be accessed at https://sirius.universalities.com/. All code and supplemental materials can be accessed at https://osf.io/pdm9r/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.05260v2-abstract-full').style.display = 'none'; document.getElementById('2106.05260v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.5.2; J.0 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.01481">arXiv:2106.01481</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.01481">pdf</a>, <a href="https://arxiv.org/format/2106.01481">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Quantifying language changes surrounding mental health on Twitter </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Stupinski%2C+A+M">Anne Marie Stupinski</a>, <a href="/search/cs?searchtype=author&amp;query=Alshaabi%2C+T">Thayer Alshaabi</a>, <a href="/search/cs?searchtype=author&amp;query=Arnold%2C+M+V">Michael V. Arnold</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+L">Jane Lydia Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Minot%2C+J+R">Joshua R. Minot</a>, <a href="/search/cs?searchtype=author&amp;query=Price%2C+M">Matthew Price</a>, <a href="/search/cs?searchtype=author&amp;query=Dodds%2C+P+S">Peter Sheridan Dodds</a>, <a href="/search/cs?searchtype=author&amp;query=Danforth%2C+C+M">Christopher M. Danforth</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.01481v1-abstract-short" style="display: inline;"> Mental health challenges are thought to afflict around 10% of the global population each year, with many going untreated due to stigma and limited access to services. Here, we explore trends in words and phrases related to mental health through a collection of 1- , 2-, and 3-grams parsed from a data stream of roughly 10% of all English tweets since 2012. We examine temporal dynamics of mental heal&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.01481v1-abstract-full').style.display = 'inline'; document.getElementById('2106.01481v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.01481v1-abstract-full" style="display: none;"> Mental health challenges are thought to afflict around 10% of the global population each year, with many going untreated due to stigma and limited access to services. Here, we explore trends in words and phrases related to mental health through a collection of 1- , 2-, and 3-grams parsed from a data stream of roughly 10% of all English tweets since 2012. We examine temporal dynamics of mental health language, finding that the popularity of the phrase &#39;mental health&#39; increased by nearly two orders of magnitude between 2012 and 2018. We observe that mentions of &#39;mental health&#39; spike annually and reliably due to mental health awareness campaigns, as well as unpredictably in response to mass shootings, celebrities dying by suicide, and popular fictional stories portraying suicide. We find that the level of positivity of messages containing &#39;mental health&#39;, while stable through the growth period, has declined recently. Finally, we use the ratio of original tweets to retweets to quantify the fraction of appearances of mental health language due to social amplification. Since 2015, mentions of mental health have become increasingly due to retweets, suggesting that stigma associated with discussion of mental health on Twitter has diminished with time. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.01481v1-abstract-full').style.display = 'none'; document.getElementById('2106.01481v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 5 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.13155">arXiv:2105.13155</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2105.13155">pdf</a>, <a href="https://arxiv.org/format/2105.13155">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-85469-0_25">10.1007/978-3-030-85469-0_25 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A Framework for Explainable Concept Drift Detection in Process Mining </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+N">Jan Niklas Adams</a>, <a href="/search/cs?searchtype=author&amp;query=van+Zelst%2C+S+J">Sebastiaan J. van Zelst</a>, <a href="/search/cs?searchtype=author&amp;query=Quack%2C+L">Lara Quack</a>, <a href="/search/cs?searchtype=author&amp;query=Hausmann%2C+K">Kathrin Hausmann</a>, <a href="/search/cs?searchtype=author&amp;query=van+der+Aalst%2C+W+M+P">Wil M. P. van der Aalst</a>, <a href="/search/cs?searchtype=author&amp;query=Rose%2C+T">Thomas Rose</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.13155v1-abstract-short" style="display: inline;"> Rapidly changing business environments expose companies to high levels of uncertainty. This uncertainty manifests itself in significant changes that tend to occur over the lifetime of a process and possibly affect its performance. It is important to understand the root causes of such changes since this allows us to react to change or anticipate future changes. Research in process mining has so far&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.13155v1-abstract-full').style.display = 'inline'; document.getElementById('2105.13155v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.13155v1-abstract-full" style="display: none;"> Rapidly changing business environments expose companies to high levels of uncertainty. This uncertainty manifests itself in significant changes that tend to occur over the lifetime of a process and possibly affect its performance. It is important to understand the root causes of such changes since this allows us to react to change or anticipate future changes. Research in process mining has so far only focused on detecting, locating and characterizing significant changes in a process and not on finding root causes of such changes. In this paper, we aim to close this gap. We propose a framework that adds an explainability level onto concept drift detection in process mining and provides insights into the cause-effect relationships behind significant changes. We define different perspectives of a process, detect concept drifts in these perspectives and plug the perspectives into a causality check that determines whether these concept drifts can be causal to each other. We showcase the effectiveness of our framework by evaluating it on both synthetic and real event data. Our experiments show that our approach unravels cause-effect relationships and provides novel insights into executed processes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.13155v1-abstract-full').style.display = 'none'; document.getElementById('2105.13155v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.12006">arXiv:2105.12006</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2105.12006">pdf</a>, <a href="https://arxiv.org/format/2105.12006">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> The incel lexicon: Deciphering the emergent cryptolect of a global misogynistic community </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gothard%2C+K">Kelly Gothard</a>, <a href="/search/cs?searchtype=author&amp;query=Dewhurst%2C+D+R">David Rushing Dewhurst</a>, <a href="/search/cs?searchtype=author&amp;query=Minot%2C+J+R">Joshua R. Minot</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J+L">Jane Lydia Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Danforth%2C+C+M">Christopher M. Danforth</a>, <a href="/search/cs?searchtype=author&amp;query=Dodds%2C+P+S">Peter Sheridan Dodds</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.12006v1-abstract-short" style="display: inline;"> Evolving out of a gender-neutral framing of an involuntary celibate identity, the concept of `incels&#39; has come to refer to an online community of men who bear antipathy towards themselves, women, and society-at-large for their perceived inability to find and maintain sexual relationships. By exploring incel language use on Reddit, a global online message board, we contextualize the incel community&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.12006v1-abstract-full').style.display = 'inline'; document.getElementById('2105.12006v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.12006v1-abstract-full" style="display: none;"> Evolving out of a gender-neutral framing of an involuntary celibate identity, the concept of `incels&#39; has come to refer to an online community of men who bear antipathy towards themselves, women, and society-at-large for their perceived inability to find and maintain sexual relationships. By exploring incel language use on Reddit, a global online message board, we contextualize the incel community&#39;s online expressions of misogyny and real-world acts of violence perpetrated against women. After assembling around three million comments from incel-themed Reddit channels, we analyze the temporal dynamics of a data driven rank ordering of the glossary of phrases belonging to an emergent incel lexicon. Our study reveals the generation and normalization of an extensive coded misogynist vocabulary in service of the group&#39;s identity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.12006v1-abstract-full').style.display = 'none'; document.getElementById('2105.12006v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 11 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.11589">arXiv:2101.11589</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.11589">pdf</a>, <a href="https://arxiv.org/format/2101.11589">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/1748-0221/16/07/P07041">10.1088/1748-0221/16/07/P07041 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A Convolutional Neural Network based Cascade Reconstruction for the IceCube Neutrino Observatory </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Abbasi%2C+R">R. Abbasi</a>, <a href="/search/cs?searchtype=author&amp;query=Ackermann%2C+M">M. Ackermann</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+J">J. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Aguilar%2C+J+A">J. A. Aguilar</a>, <a href="/search/cs?searchtype=author&amp;query=Ahlers%2C+M">M. Ahlers</a>, <a href="/search/cs?searchtype=author&amp;query=Ahrens%2C+M">M. Ahrens</a>, <a href="/search/cs?searchtype=author&amp;query=Alispach%2C+C">C. Alispach</a>, <a href="/search/cs?searchtype=author&amp;query=Alves%2C+A+A">A. A. Alves Jr.</a>, <a href="/search/cs?searchtype=author&amp;query=Amin%2C+N+M">N. M. Amin</a>, <a href="/search/cs?searchtype=author&amp;query=An%2C+R">R. An</a>, <a href="/search/cs?searchtype=author&amp;query=Andeen%2C+K">K. Andeen</a>, <a href="/search/cs?searchtype=author&amp;query=Anderson%2C+T">T. Anderson</a>, <a href="/search/cs?searchtype=author&amp;query=Ansseau%2C+I">I. Ansseau</a>, <a href="/search/cs?searchtype=author&amp;query=Anton%2C+G">G. Anton</a>, <a href="/search/cs?searchtype=author&amp;query=Arg%C3%BCelles%2C+C">C. Arg眉elles</a>, <a href="/search/cs?searchtype=author&amp;query=Axani%2C+S">S. Axani</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+X">X. Bai</a>, <a href="/search/cs?searchtype=author&amp;query=V.%2C+A+B">A. Balagopal V.</a>, <a href="/search/cs?searchtype=author&amp;query=Barbano%2C+A">A. Barbano</a>, <a href="/search/cs?searchtype=author&amp;query=Barwick%2C+S+W">S. W. Barwick</a>, <a href="/search/cs?searchtype=author&amp;query=Bastian%2C+B">B. Bastian</a>, <a href="/search/cs?searchtype=author&amp;query=Basu%2C+V">V. Basu</a>, <a href="/search/cs?searchtype=author&amp;query=Baum%2C+V">V. Baum</a>, <a href="/search/cs?searchtype=author&amp;query=Baur%2C+S">S. Baur</a>, <a href="/search/cs?searchtype=author&amp;query=Bay%2C+R">R. Bay</a> , et al. (343 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.11589v2-abstract-short" style="display: inline;"> Continued improvements on existing reconstruction methods are vital to the success of high-energy physics experiments, such as the IceCube Neutrino Observatory. In IceCube, further challenges arise as the detector is situated at the geographic South Pole where computational resources are limited. However, to perform real-time analyses and to issue alerts to telescopes around the world, powerful an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.11589v2-abstract-full').style.display = 'inline'; document.getElementById('2101.11589v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.11589v2-abstract-full" style="display: none;"> Continued improvements on existing reconstruction methods are vital to the success of high-energy physics experiments, such as the IceCube Neutrino Observatory. In IceCube, further challenges arise as the detector is situated at the geographic South Pole where computational resources are limited. However, to perform real-time analyses and to issue alerts to telescopes around the world, powerful and fast reconstruction methods are desired. Deep neural networks can be extremely powerful, and their usage is computationally inexpensive once the networks are trained. These characteristics make a deep learning-based approach an excellent candidate for the application in IceCube. A reconstruction method based on convolutional architectures and hexagonally shaped kernels is presented. The presented method is robust towards systematic uncertainties in the simulation and has been tested on experimental data. In comparison to standard reconstruction methods in IceCube, it can improve upon the reconstruction accuracy, while reducing the time necessary to run the reconstruction by two to three orders of magnitude. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.11589v2-abstract-full').style.display = 'none'; document.getElementById('2101.11589v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">39 pages, 15 figures, submitted to Journal of Instrumentation; added references</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> JINST 16 (2021) P07041 </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Adams%2C+J&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Adams%2C+J&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Adams%2C+J&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10