CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;21 of 21 results for author: <span class="mathjax">M眉ller, J P</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=M%C3%BCller%2C+J+P">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="M眉ller, J P"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=M%C3%BCller%2C+J+P&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="M眉ller, J P"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.14038">arXiv:2406.14038</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.14038">pdf</a>, <a href="https://arxiv.org/format/2406.14038">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Resource-efficient Medical Image Analysis with Self-adapting Forward-Forward Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">Johanna P. M眉ller</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.14038v2-abstract-short" style="display: inline;"> We introduce a fast Self-adapting Forward-Forward Network (SaFF-Net) for medical imaging analysis, mitigating power consumption and resource limitations, which currently primarily stem from the prevalent reliance on back-propagation for model training and fine-tuning. Building upon the recently proposed Forward-Forward Algorithm (FFA), we introduce the Convolutional Forward-Forward Algorithm (CFFA&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.14038v2-abstract-full').style.display = 'inline'; document.getElementById('2406.14038v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.14038v2-abstract-full" style="display: none;"> We introduce a fast Self-adapting Forward-Forward Network (SaFF-Net) for medical imaging analysis, mitigating power consumption and resource limitations, which currently primarily stem from the prevalent reliance on back-propagation for model training and fine-tuning. Building upon the recently proposed Forward-Forward Algorithm (FFA), we introduce the Convolutional Forward-Forward Algorithm (CFFA), a parameter-efficient reformulation that is suitable for advanced image analysis and overcomes the speed and generalisation constraints of the original FFA. To address hyper-parameter sensitivity of FFAs we are also introducing a self-adapting framework SaFF-Net fine-tuning parameters during warmup and training in parallel. Our approach enables more effective model training and eliminates the previously essential requirement for an arbitrarily chosen Goodness function in FFA. We evaluate our approach on several benchmarking datasets in comparison with standard Back-Propagation (BP) neural networks showing that FFA-based networks with notably fewer parameters and function evaluations can compete with standard models, especially, in one-shot scenarios and large batch sizes. The code will be available at the time of the conference. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.14038v2-abstract-full').style.display = 'none'; document.getElementById('2406.14038v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for MICCAI Workshop MLMI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.20705">arXiv:2405.20705</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.20705">pdf</a>, <a href="https://arxiv.org/format/2405.20705">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.24963/ijcai.2024/875">10.24963/ijcai.2024/875 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> ADESSE: Advice Explanations in Complex Repeated Decision-Making Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Schleibaum%2C+S">S枚ren Schleibaum</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+L">Lu Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Kraus%2C+S">Sarit Kraus</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">J枚rg P. M眉ller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.20705v2-abstract-short" style="display: inline;"> In the evolving landscape of human-centered AI, fostering a synergistic relationship between humans and AI agents in decision-making processes stands as a paramount challenge. This work considers a problem setup where an intelligent agent comprising a neural network-based prediction component and a deep reinforcement learning component provides advice to a human decision-maker in complex repeated&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.20705v2-abstract-full').style.display = 'inline'; document.getElementById('2405.20705v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.20705v2-abstract-full" style="display: none;"> In the evolving landscape of human-centered AI, fostering a synergistic relationship between humans and AI agents in decision-making processes stands as a paramount challenge. This work considers a problem setup where an intelligent agent comprising a neural network-based prediction component and a deep reinforcement learning component provides advice to a human decision-maker in complex repeated decision-making environments. Whether the human decision-maker would follow the agent&#39;s advice depends on their beliefs and trust in the agent and on their understanding of the advice itself. To this end, we developed an approach named ADESSE to generate explanations about the adviser agent to improve human trust and decision-making. Computational experiments on a range of environments with varying model sizes demonstrate the applicability and scalability of ADESSE. Furthermore, an interactive game-based user study shows that participants were significantly more satisfied, achieved a higher reward in the game, and took less time to select an action when presented with explanations generated by ADESSE. These findings illuminate the critical role of tailored, human-centered explanations in AI-assisted decision-making. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.20705v2-abstract-full').style.display = 'none'; document.getElementById('2405.20705v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.18645">arXiv:2311.18645</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.18645">pdf</a>, <a href="https://arxiv.org/format/2311.18645">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Stochastic Vision Transformers with Wasserstein Distance-Aware Attention </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Erick%2C+F+X">Franciskus Xaverius Erick</a>, <a href="/search/cs?searchtype=author&amp;query=Rezaei%2C+M">Mina Rezaei</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">Johanna Paula M眉ller</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.18645v1-abstract-short" style="display: inline;"> Self-supervised learning is one of the most promising approaches to acquiring knowledge from limited labeled data. Despite the substantial advancements made in recent years, self-supervised models have posed a challenge to practitioners, as they do not readily provide insight into the model&#39;s confidence and uncertainty. Tackling this issue is no simple feat, primarily due to the complexity involve&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.18645v1-abstract-full').style.display = 'inline'; document.getElementById('2311.18645v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.18645v1-abstract-full" style="display: none;"> Self-supervised learning is one of the most promising approaches to acquiring knowledge from limited labeled data. Despite the substantial advancements made in recent years, self-supervised models have posed a challenge to practitioners, as they do not readily provide insight into the model&#39;s confidence and uncertainty. Tackling this issue is no simple feat, primarily due to the complexity involved in implementing techniques that can make use of the latent representations learned during pre-training without relying on explicit labels. Motivated by this, we introduce a new stochastic vision transformer that integrates uncertainty and distance awareness into self-supervised learning (SSL) pipelines. Instead of the conventional deterministic vector embedding, our novel stochastic vision transformer encodes image patches into elliptical Gaussian distributional embeddings. Notably, the attention matrices of these stochastic representational embeddings are computed using Wasserstein distance-based attention, effectively capitalizing on the distributional nature of these embeddings. Additionally, we propose a regularization term based on Wasserstein distance for both pre-training and fine-tuning processes, thereby incorporating distance awareness into latent representations. We perform extensive experiments across different tasks such as in-distribution generalization, out-of-distribution detection, dataset corruption, semi-supervised settings, and transfer learning to other datasets and tasks. Our proposed method achieves superior accuracy and calibration, surpassing the self-supervised baseline in a wide range of experiments on a variety of datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.18645v1-abstract-full').style.display = 'none'; document.getElementById('2311.18645v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.04187">arXiv:2310.04187</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.04187">pdf</a>, <a href="https://arxiv.org/format/2310.04187">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-031-44992-5_2">10.1007/978-3-031-44992-5_2 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Whole Slide Multiple Instance Learning for Predicting Axillary Lymph Node Metastasis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shk%C3%ABmbi%2C+G">Glejdis Shk毛mbi</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">Johanna P. M眉ller</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zhe Li</a>, <a href="/search/cs?searchtype=author&amp;query=Breininger%2C+K">Katharina Breininger</a>, <a href="/search/cs?searchtype=author&amp;query=Sch%C3%BCffler%2C+P">Peter Sch眉ffler</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.04187v1-abstract-short" style="display: inline;"> Breast cancer is a major concern for women&#39;s health globally, with axillary lymph node (ALN) metastasis identification being critical for prognosis evaluation and treatment guidance. This paper presents a deep learning (DL) classification pipeline for quantifying clinical information from digital core-needle biopsy (CNB) images, with one step less than existing methods. A publicly available datase&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.04187v1-abstract-full').style.display = 'inline'; document.getElementById('2310.04187v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.04187v1-abstract-full" style="display: none;"> Breast cancer is a major concern for women&#39;s health globally, with axillary lymph node (ALN) metastasis identification being critical for prognosis evaluation and treatment guidance. This paper presents a deep learning (DL) classification pipeline for quantifying clinical information from digital core-needle biopsy (CNB) images, with one step less than existing methods. A publicly available dataset of 1058 patients was used to evaluate the performance of different baseline state-of-the-art (SOTA) DL models in classifying ALN metastatic status based on CNB images. An extensive ablation study of various data augmentation techniques was also conducted. Finally, the manual tumor segmentation and annotation step performed by the pathologists was assessed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.04187v1-abstract-full').style.display = 'none'; document.getElementById('2310.04187v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for MICCAI DEMI Workshop 2023</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Data Engineering in Medical Imaging. DEMI 2023. Lecture Notes in Computer Science, vol 14314. Springer, Cham </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.00899">arXiv:2307.00899</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.00899">pdf</a>, <a href="https://arxiv.org/format/2307.00899">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Many tasks make light work: Learning to localise medical anomalies from multiple synthetic tasks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Baugh%2C+M">Matthew Baugh</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+J">Jeremy Tan</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">Johanna P. M眉ller</a>, <a href="/search/cs?searchtype=author&amp;query=Dombrowski%2C+M">Mischa Dombrowski</a>, <a href="/search/cs?searchtype=author&amp;query=Batten%2C+J">James Batten</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.00899v1-abstract-short" style="display: inline;"> There is a growing interest in single-class modelling and out-of-distribution detection as fully supervised machine learning models cannot reliably identify classes not included in their training. The long tail of infinitely many out-of-distribution classes in real-world scenarios, e.g., for screening, triage, and quality control, means that it is often necessary to train single-class models that&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.00899v1-abstract-full').style.display = 'inline'; document.getElementById('2307.00899v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.00899v1-abstract-full" style="display: none;"> There is a growing interest in single-class modelling and out-of-distribution detection as fully supervised machine learning models cannot reliably identify classes not included in their training. The long tail of infinitely many out-of-distribution classes in real-world scenarios, e.g., for screening, triage, and quality control, means that it is often necessary to train single-class models that represent an expected feature distribution, e.g., from only strictly healthy volunteer data. Conventional supervised machine learning would require the collection of datasets that contain enough samples of all possible diseases in every imaging modality, which is not realistic. Self-supervised learning methods with synthetic anomalies are currently amongst the most promising approaches, alongside generative auto-encoders that analyse the residual reconstruction error. However, all methods suffer from a lack of structured validation, which makes calibration for deployment difficult and dataset-dependant. Our method alleviates this by making use of multiple visually-distinct synthetic anomaly learning tasks for both training and validation. This enables more robust training and generalisation. With our approach we can readily outperform state-of-the-art methods, which we demonstrate on exemplars in brain MRI and chest X-rays. Code is available at https://github.com/matt-baugh/many-tasks-make-light-work . <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.00899v1-abstract-full').style.display = 'none'; document.getElementById('2307.00899v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Early accepted to MICCAI 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.09269">arXiv:2306.09269</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.09269">pdf</a>, <a href="https://arxiv.org/format/2306.09269">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Zero-Shot Anomaly Detection with Pre-trained Segmentation Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Baugh%2C+M">Matthew Baugh</a>, <a href="/search/cs?searchtype=author&amp;query=Batten%2C+J">James Batten</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">Johanna P. M眉ller</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.09269v1-abstract-short" style="display: inline;"> This technical report outlines our submission to the zero-shot track of the Visual Anomaly and Novelty Detection (VAND) 2023 Challenge. Building on the performance of the WINCLIP framework, we aim to enhance the system&#39;s localization capabilities by integrating zero-shot segmentation models. In addition, we perform foreground instance segmentation which enables the model to focus on the relevant p&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.09269v1-abstract-full').style.display = 'inline'; document.getElementById('2306.09269v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.09269v1-abstract-full" style="display: none;"> This technical report outlines our submission to the zero-shot track of the Visual Anomaly and Novelty Detection (VAND) 2023 Challenge. Building on the performance of the WINCLIP framework, we aim to enhance the system&#39;s localization capabilities by integrating zero-shot segmentation models. In addition, we perform foreground instance segmentation which enables the model to focus on the relevant parts of the image, thus allowing the models to better identify small or subtle deviations. Our pipeline requires no external data or information, allowing for it to be directly applied to new datasets. Our team (Variance Vigilance Vanguard) ranked third in the zero-shot track of the VAND challenge, and achieve an average F1-max score of 81.5/24.2 at a sample/pixel level on the VisA dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.09269v1-abstract-full').style.display = 'none'; document.getElementById('2306.09269v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Ranked 3rd in zero-shot track of the Visual Anomaly and Novelty Detection (VAND) 2023 Challenge</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.17908">arXiv:2303.17908</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.17908">pdf</a>, <a href="https://arxiv.org/format/2303.17908">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Trade-offs in Fine-tuned Diffusion Models Between Accuracy and Interpretability </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dombrowski%2C+M">Mischa Dombrowski</a>, <a href="/search/cs?searchtype=author&amp;query=Reynaud%2C+H">Hadrien Reynaud</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">Johanna P. M眉ller</a>, <a href="/search/cs?searchtype=author&amp;query=Baugh%2C+M">Matthew Baugh</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.17908v2-abstract-short" style="display: inline;"> Recent advancements in diffusion models have significantly impacted the trajectory of generative machine learning research, with many adopting the strategy of fine-tuning pre-trained models using domain-specific text-to-image datasets. Notably, this method has been readily employed for medical applications, such as X-ray image synthesis, leveraging the plethora of associated radiology reports. Yet&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.17908v2-abstract-full').style.display = 'inline'; document.getElementById('2303.17908v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.17908v2-abstract-full" style="display: none;"> Recent advancements in diffusion models have significantly impacted the trajectory of generative machine learning research, with many adopting the strategy of fine-tuning pre-trained models using domain-specific text-to-image datasets. Notably, this method has been readily employed for medical applications, such as X-ray image synthesis, leveraging the plethora of associated radiology reports. Yet, a prevailing concern is the lack of assurance on whether these models genuinely comprehend their generated content. With the evolution of text-conditional image generation, these models have grown potent enough to facilitate object localization scrutiny. Our research underscores this advancement in the critical realm of medical imaging, emphasizing the crucial role of interpretability. We further unravel a consequential trade-off between image fidelity as gauged by conventional metrics and model interpretability in generative diffusion models. Specifically, the adoption of learnable text encoders when fine-tuning results in diminished interpretability. Our in-depth exploration uncovers the underlying factors responsible for this divergence. Consequently, we present a set of design principles for the development of truly interpretable generative models. Code is available at https://github.com/MischaD/chest-distillation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.17908v2-abstract-full').style.display = 'none'; document.getElementById('2303.17908v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.13227">arXiv:2303.13227</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.13227">pdf</a>, <a href="https://arxiv.org/format/2303.13227">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Confidence-Aware and Self-Supervised Image Anomaly Localisation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">Johanna P. M眉ller</a>, <a href="/search/cs?searchtype=author&amp;query=Baugh%2C+M">Matthew Baugh</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+J">Jeremy Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Dombrowski%2C+M">Mischa Dombrowski</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.13227v2-abstract-short" style="display: inline;"> Universal anomaly detection still remains a challenging problem in machine learning and medical image analysis. It is possible to learn an expected distribution from a single class of normative samples, e.g., through epistemic uncertainty estimates, auto-encoding models, or from synthetic anomalies in a self-supervised way. The performance of self-supervised anomaly detection approaches is still i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.13227v2-abstract-full').style.display = 'inline'; document.getElementById('2303.13227v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.13227v2-abstract-full" style="display: none;"> Universal anomaly detection still remains a challenging problem in machine learning and medical image analysis. It is possible to learn an expected distribution from a single class of normative samples, e.g., through epistemic uncertainty estimates, auto-encoding models, or from synthetic anomalies in a self-supervised way. The performance of self-supervised anomaly detection approaches is still inferior compared to methods that use examples from known unknown classes to shape the decision boundary. However, outlier exposure methods often do not identify unknown unknowns. Here we discuss an improved self-supervised single-class training strategy that supports the approximation of probabilistic inference with loosen feature locality constraints. We show that up-scaling of gradients with histogram-equalised images is beneficial for recently proposed self-supervision tasks. Our method is integrated into several out-of-distribution (OOD) detection models and we show evidence that our method outperforms the state-of-the-art on various benchmark datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.13227v2-abstract-full').style.display = 'none'; document.getElementById('2303.13227v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for MICCAI UNSURE Workshop 2023 (Spotlight)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.12305">arXiv:2209.12305</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.12305">pdf</a>, <a href="https://arxiv.org/format/2209.12305">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Adnexal Mass Segmentation with Ultrasound Data Synthesis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lebbos%2C+C">Clara Lebbos</a>, <a href="/search/cs?searchtype=author&amp;query=Barcroft%2C+J">Jen Barcroft</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+J">Jeremy Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Muller%2C+J+P">Johanna P. Muller</a>, <a href="/search/cs?searchtype=author&amp;query=Baugh%2C+M">Matthew Baugh</a>, <a href="/search/cs?searchtype=author&amp;query=Vlontzos%2C+A">Athanasios Vlontzos</a>, <a href="/search/cs?searchtype=author&amp;query=Saso%2C+S">Srdjan Saso</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.12305v1-abstract-short" style="display: inline;"> Ovarian cancer is the most lethal gynaecological malignancy. The disease is most commonly asymptomatic at its early stages and its diagnosis relies on expert evaluation of transvaginal ultrasound images. Ultrasound is the first-line imaging modality for characterising adnexal masses, it requires significant expertise and its analysis is subjective and labour-intensive, therefore open to error. Hen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.12305v1-abstract-full').style.display = 'inline'; document.getElementById('2209.12305v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.12305v1-abstract-full" style="display: none;"> Ovarian cancer is the most lethal gynaecological malignancy. The disease is most commonly asymptomatic at its early stages and its diagnosis relies on expert evaluation of transvaginal ultrasound images. Ultrasound is the first-line imaging modality for characterising adnexal masses, it requires significant expertise and its analysis is subjective and labour-intensive, therefore open to error. Hence, automating processes to facilitate and standardise the evaluation of scans is desired in clinical practice. Using supervised learning, we have demonstrated that segmentation of adnexal masses is possible, however, prevalence and label imbalance restricts the performance on under-represented classes. To mitigate this we apply a novel pathology-specific data synthesiser. We create synthetic medical images with their corresponding ground truth segmentations by using Poisson image editing to integrate less common masses into other samples. Our approach achieves the best performance across all classes, including an improvement of up to 8% when compared with nnU-Net baseline approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.12305v1-abstract-full').style.display = 'none'; document.getElementById('2209.12305v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> ASMUS 2022, LNCS 13565, p. 106, 2022 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.01124">arXiv:2209.01124</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.01124">pdf</a>, <a href="https://arxiv.org/format/2209.01124">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> nnOOD: A Framework for Benchmarking Self-supervised Anomaly Localisation Methods </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Baugh%2C+M">Matthew Baugh</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+J">Jeremy Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Vlontzos%2C+A">Athanasios Vlontzos</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">Johanna P. M眉ller</a>, <a href="/search/cs?searchtype=author&amp;query=Kainz%2C+B">Bernhard Kainz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.01124v1-abstract-short" style="display: inline;"> The wide variety of in-distribution and out-of-distribution data in medical imaging makes universal anomaly detection a challenging task. Recently a number of self-supervised methods have been developed that train end-to-end models on healthy data augmented with synthetic anomalies. However, it is difficult to compare these methods as it is not clear whether gains in performance are from the task&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.01124v1-abstract-full').style.display = 'inline'; document.getElementById('2209.01124v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.01124v1-abstract-full" style="display: none;"> The wide variety of in-distribution and out-of-distribution data in medical imaging makes universal anomaly detection a challenging task. Recently a number of self-supervised methods have been developed that train end-to-end models on healthy data augmented with synthetic anomalies. However, it is difficult to compare these methods as it is not clear whether gains in performance are from the task itself or the training pipeline around it. It is also difficult to assess whether a task generalises well for universal anomaly detection, as they are often only tested on a limited range of anomalies. To assist with this we have developed nnOOD, a framework that adapts nnU-Net to allow for comparison of self-supervised anomaly localisation methods. By isolating the synthetic, self-supervised task from the rest of the training process we perform a more faithful comparison of the tasks, whilst also making the workflow for evaluating over a given dataset quick and easy. Using this we have implemented the current state-of-the-art tasks and evaluated them on a challenging X-ray dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.01124v1-abstract-full').style.display = 'none'; document.getElementById('2209.01124v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted as Spotlight to UNSURE 2022, a workshop at MICCAI 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.09438">arXiv:2203.09438</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.09438">pdf</a>, <a href="https://arxiv.org/format/2203.09438">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> An Explainable Stacked Ensemble Model for Static Route-Free Estimation of Time of Arrival </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Schleibaum%2C+S">S枚ren Schleibaum</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">J枚rg P. M眉ller</a>, <a href="/search/cs?searchtype=author&amp;query=Sester%2C+M">Monika Sester</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.09438v2-abstract-short" style="display: inline;"> To compare alternative taxi schedules and to compute them, as well as to provide insights into an upcoming taxi trip to drivers and passengers, the duration of a trip or its Estimated Time of Arrival (ETA) is predicted. To reach a high prediction precision, machine learning models for ETA are state of the art. One yet unexploited option to further increase prediction precision is to combine multip&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.09438v2-abstract-full').style.display = 'inline'; document.getElementById('2203.09438v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.09438v2-abstract-full" style="display: none;"> To compare alternative taxi schedules and to compute them, as well as to provide insights into an upcoming taxi trip to drivers and passengers, the duration of a trip or its Estimated Time of Arrival (ETA) is predicted. To reach a high prediction precision, machine learning models for ETA are state of the art. One yet unexploited option to further increase prediction precision is to combine multiple ETA models into an ensemble. While an increase of prediction precision is likely, the main drawback is that the predictions made by such an ensemble become less transparent due to the sophisticated ensemble architecture. One option to remedy this drawback is to apply eXplainable Artificial Intelligence (XAI). The contribution of this paper is three-fold. First, we combine multiple machine learning models from our previous work for ETA into a two-level ensemble model - a stacked ensemble model - which on its own is novel; therefore, we can outperform previous state-of-the-art static route-free ETA approaches. Second, we apply existing XAI methods to explain the first- and second-level models of the ensemble. Third, we propose three joining methods for combining the first-level explanations with the second-level ones. Those joining methods enable us to explain stacked ensembles for regression tasks. An experimental evaluation shows that the ETA models correctly learned the importance of those input features driving the prediction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.09438v2-abstract-full').style.display = 'none'; document.getElementById('2203.09438v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.13419">arXiv:2202.13419</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.13419">pdf</a>, <a href="https://arxiv.org/format/2202.13419">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> On Intercultural Transferability and Calibration of Heterogeneous Shared Space Motion Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Johora%2C+F+T">Fatema T. Johora</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">J枚rg P. M眉ller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.13419v1-abstract-short" style="display: inline;"> Modelling and simulation of mixed-traffic zones is an important tool for transportation planners to assess safety, efficiency, and human-friendliness of future urban areas. This paper addresses problems of calibration and transferability of existing shared space models when applied to scenarios that differ in terms of cultural aspects, traffic conditions, and spatial layout. In particular, the fir&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.13419v1-abstract-full').style.display = 'inline'; document.getElementById('2202.13419v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.13419v1-abstract-full" style="display: none;"> Modelling and simulation of mixed-traffic zones is an important tool for transportation planners to assess safety, efficiency, and human-friendliness of future urban areas. This paper addresses problems of calibration and transferability of existing shared space models when applied to scenarios that differ in terms of cultural aspects, traffic conditions, and spatial layout. In particular, the first contribution of this work is an enhancement of the Game-Theoretic Social Force Model (GSFM) by a generic methodology for largely automated model calibration; we illustrate the use of the calibration method for a shared space environment in Germany. The second contribution is an investigation into transferability of shared space models. We define criteria for model transferability and present a case study, in which we analyse and evaluate transferability of the model we constructed based on the ``German dataset&#39;&#39; to a different shared space environment from China. Our results indicate that although -- as to be expected -- the model faces difficulties to replicate the movement behaviours of road users from a new environment, by adding social norms (derived through analysis) of that environment to our model, satisfactory improvement of model accuracy can be obtained with limited effort. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.13419v1-abstract-full').style.display = 'none'; document.getElementById('2202.13419v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Transportation Letters, 2021 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.13410">arXiv:2202.13410</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.13410">pdf</a>, <a href="https://arxiv.org/format/2202.13410">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> Investigating the Role of Pedestrian Groups in Shared Spaces through Simulation Modeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ahmed%2C+S">Suhair Ahmed</a>, <a href="/search/cs?searchtype=author&amp;query=Johora%2C+F+T">Fatema T. Johora</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">J枚rg P. M眉ller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.13410v1-abstract-short" style="display: inline;"> In shared space environments, urban space is shared among different types of road users, who frequently interact with each other to negotiate priority and coordinate their trajectories. Instead of traffic rules, interactions among them are conducted by informal rules like speed limitations and by social protocols e.g., courtesy behavior. Social groups (socially related road users who walk together&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.13410v1-abstract-full').style.display = 'inline'; document.getElementById('2202.13410v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.13410v1-abstract-full" style="display: none;"> In shared space environments, urban space is shared among different types of road users, who frequently interact with each other to negotiate priority and coordinate their trajectories. Instead of traffic rules, interactions among them are conducted by informal rules like speed limitations and by social protocols e.g., courtesy behavior. Social groups (socially related road users who walk together) are an essential phenomenon in shared spaces and affect the safety and efficiency of such environments. To replicate group phenomena and systematically study their influence in shared spaces; realistic models of social groups and the integration of these models into shared space simulations are required. In this work, we focus on pedestrian groups and adopt an extended version of the social force model in conjunction with a game-theoretic model to simulate their movements. The novelty of our paper is in the modeling of interactions between social groups and vehicles. We validate our model by simulating scenarios involving interaction between social groups and also group-to-vehicle interaction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.13410v1-abstract-full').style.display = 'none'; document.getElementById('2202.13410v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> International Workshop on Simulation Science, p52 to69,2019, Springer </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.02791">arXiv:2202.02791</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.02791">pdf</a>, <a href="https://arxiv.org/format/2202.02791">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> SFMGNet: A Physics-based Neural Network To Predict Pedestrian Trajectories </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hossain%2C+S">Sakif Hossain</a>, <a href="/search/cs?searchtype=author&amp;query=Johora%2C+F+T">Fatema T. Johora</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">J枚rg P. M眉ller</a>, <a href="/search/cs?searchtype=author&amp;query=Hartmann%2C+S">Sven Hartmann</a>, <a href="/search/cs?searchtype=author&amp;query=Reinhardt%2C+A">Andreas Reinhardt</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.02791v1-abstract-short" style="display: inline;"> Autonomous robots and vehicles are expected to soon become an integral part of our environment. Unsatisfactory issues regarding interaction with existing road users, performance in mixed-traffic areas and lack of interpretable behavior remain key obstacles. To address these, we present a physics-based neural network, based on a hybrid approach combining a social force model extended by group force&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.02791v1-abstract-full').style.display = 'inline'; document.getElementById('2202.02791v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.02791v1-abstract-full" style="display: none;"> Autonomous robots and vehicles are expected to soon become an integral part of our environment. Unsatisfactory issues regarding interaction with existing road users, performance in mixed-traffic areas and lack of interpretable behavior remain key obstacles. To address these, we present a physics-based neural network, based on a hybrid approach combining a social force model extended by group force (SFMG) with Multi-Layer Perceptron (MLP) to predict pedestrian trajectories considering its interaction with static obstacles, other pedestrians and pedestrian groups. We quantitatively and qualitatively evaluate the model with respect to realistic prediction, prediction performance and prediction &#34;interpretability&#34;. Initial results suggest, the model even when solely trained on a synthetic dataset, can predict realistic and interpretable trajectories with better than state-of-the-art accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.02791v1-abstract-full').style.display = 'none'; document.getElementById('2202.02791v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 6 figures, AAAI-MAKE 2022: Machine Learning and Knowledge Engineering for Hybrid Intelligence</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.09188">arXiv:2110.09188</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.09188">pdf</a>, <a href="https://arxiv.org/ps/2110.09188">ps</a>, <a href="https://arxiv.org/format/2110.09188">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Ride Sharing &amp; Data Privacy: An Analysis of the State of Practice </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hesselmann%2C+C">Carsten Hesselmann</a>, <a href="/search/cs?searchtype=author&amp;query=Gertheiss%2C+J">Jan Gertheiss</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">J枚rg P. M眉ller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.09188v2-abstract-short" style="display: inline;"> Digital services like ride sharing rely heavily on personal data as individuals have to disclose personal information in order to gain access to the market and exchange their information with other participants; yet, the service provider usually gives little to no information regarding the privacy status of the disclosed information though privacy concerns are a decisive factor for individuals to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.09188v2-abstract-full').style.display = 'inline'; document.getElementById('2110.09188v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.09188v2-abstract-full" style="display: none;"> Digital services like ride sharing rely heavily on personal data as individuals have to disclose personal information in order to gain access to the market and exchange their information with other participants; yet, the service provider usually gives little to no information regarding the privacy status of the disclosed information though privacy concerns are a decisive factor for individuals to (not) use these services. We analyzed how popular ride sharing services handle user privacy to assess the current state of practice. The results show that services include a varying set of personal data and offer limited privacy-related features. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.09188v2-abstract-full').style.display = 'none'; document.getElementById('2110.09188v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.02083">arXiv:2107.02083</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.02083">pdf</a>, <a href="https://arxiv.org/format/2107.02083">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Modeling Interactions of Multimodal Road Users in Shared Spaces </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Johora%2C+F+T">Fatema T. Johora</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">J枚rg P. M眉ller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.02083v1-abstract-short" style="display: inline;"> In shared spaces, motorized and non-motorized road users share the same space with equal priority. Their movements are not regulated by traffic rules, hence they interact more frequently to negotiate priority over the shared space. To estimate the safeness and efficiency of shared spaces, reproducing the traffic behavior in such traffic places is important. In this paper, we consider and combine d&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.02083v1-abstract-full').style.display = 'inline'; document.getElementById('2107.02083v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.02083v1-abstract-full" style="display: none;"> In shared spaces, motorized and non-motorized road users share the same space with equal priority. Their movements are not regulated by traffic rules, hence they interact more frequently to negotiate priority over the shared space. To estimate the safeness and efficiency of shared spaces, reproducing the traffic behavior in such traffic places is important. In this paper, we consider and combine different levels of interaction between pedestrians and cars in shared space environments. Our proposed model consists of three layers: a layer to plan trajectories of road users; a force-based modeling layer to reproduce free flow movement and simple interactions; and a game-theoretic decision layer to handle complex situations where road users need to make a decision over different alternatives. We validate our model by simulating scenarios involving various interactions between pedestrians and cars and also car-to-car interaction. The results indicate that simulated behaviors match observed behaviors well. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.02083v1-abstract-full').style.display = 'none'; document.getElementById('2107.02083v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE, 2018, https://ieeexplore.ieee.org/document/8569687 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.06974">arXiv:2101.06974</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.06974">pdf</a>, <a href="https://arxiv.org/format/2101.06974">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> </div> </div> <p class="title is-5 mathjax"> On the Generalizability of Motion Models for Road Users in Heterogeneous Shared Traffic Spaces </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Johora%2C+F+T">Fatema T. Johora</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+D">Dongfang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">J枚rg P. M眉ller</a>, <a href="/search/cs?searchtype=author&amp;query=%C3%96zg%C3%BCner%2C+%C3%9C">脺mit 脰zg眉ner</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.06974v1-abstract-short" style="display: inline;"> Modeling mixed-traffic motion and interactions is crucial to assess safety, efficiency, and feasibility of future urban areas. The lack of traffic regulations, diverse transport modes, and the dynamic nature of mixed-traffic zones like shared spaces make realistic modeling of such environments challenging. This paper focuses on the generalizability of the motion model, i.e., its ability to generat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.06974v1-abstract-full').style.display = 'inline'; document.getElementById('2101.06974v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.06974v1-abstract-full" style="display: none;"> Modeling mixed-traffic motion and interactions is crucial to assess safety, efficiency, and feasibility of future urban areas. The lack of traffic regulations, diverse transport modes, and the dynamic nature of mixed-traffic zones like shared spaces make realistic modeling of such environments challenging. This paper focuses on the generalizability of the motion model, i.e., its ability to generate realistic behavior in different environmental settings, an aspect which is lacking in existing works. Specifically, our first contribution is a novel and systematic process of formulating general motion models and application of this process is to extend our Game-Theoretic Social Force Model (GSFM) towards a general model for generating a large variety of motion behaviors of pedestrians and cars from different shared spaces. Our second contribution is to consider different motion patterns of pedestrians by calibrating motion-related features of individual pedestrian and clustering them into groups. We analyze two clustering approaches. The calibration and evaluation of our model are performed on three different shared space data sets. The results indicate that our model can realistically simulate a wide range of motion behaviors and interaction scenarios, and that adding different motion patterns of pedestrians into our model improves its performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.06974v1-abstract-full').style.display = 'none'; document.getElementById('2101.06974v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.03554">arXiv:2101.03554</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.03554">pdf</a>, <a href="https://arxiv.org/format/2101.03554">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Sub-Goal Social Force Model for Collective Pedestrian Motion Under Vehicle Influence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+D">Dongfang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Johora%2C+F+T">Fatema T. Johora</a>, <a href="/search/cs?searchtype=author&amp;query=Redmill%2C+K+A">Keith A. Redmill</a>, <a href="/search/cs?searchtype=author&amp;query=%C3%96zg%C3%BCner%2C+%C3%9C">脺mit 脰zg眉ner</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">J枚rg P. M眉ller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.03554v1-abstract-short" style="display: inline;"> In mixed traffic scenarios, a certain number of pedestrians might coexist in a small area while interacting with vehicles. In this situation, every pedestrian must simultaneously react to the surrounding pedestrians and vehicles. Analytical modeling of such collective pedestrian motion can benefit intelligent transportation practices like shared space design and urban autonomous driving. This work&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.03554v1-abstract-full').style.display = 'inline'; document.getElementById('2101.03554v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.03554v1-abstract-full" style="display: none;"> In mixed traffic scenarios, a certain number of pedestrians might coexist in a small area while interacting with vehicles. In this situation, every pedestrian must simultaneously react to the surrounding pedestrians and vehicles. Analytical modeling of such collective pedestrian motion can benefit intelligent transportation practices like shared space design and urban autonomous driving. This work proposed the sub-goal social force model (SG-SFM) to describe the collective pedestrian motion under vehicle influence. The proposed model introduced a new design of vehicle influence on pedestrian motion, which was smoothly combined with the influence of surrounding pedestrians using the sub-goal concept. This model aims to describe generalized pedestrian motion, i.e., it is applicable to various vehicle-pedestrian interaction patterns. The generalization was verified by both quantitative and qualitative evaluation. The quantitative evaluation was conducted to reproduce pedestrian motion in three different datasets, HBS, CITR, and DUT. It also compared two different ways of calibrating the model parameters. The qualitative evaluation examined the simulation of collective pedestrian motion in a series of fundamental vehicle-pedestrian interaction scenarios. The above evaluation results demonstrated the effectiveness of the proposed model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.03554v1-abstract-full').style.display = 'none'; document.getElementById('2101.03554v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">submitted to IEEE Transactions on Intelligent Transportation Systems</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1912.08929">arXiv:1912.08929</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1912.08929">pdf</a>, <a href="https://arxiv.org/format/1912.08929">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-68028-2_14">10.1007/978-3-030-68028-2_14 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> PFaRA: a Platoon Forming and Routing Algorithm for Same-Day Deliveries </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sebe%2C+S">S卯nziana-Maria Sebe</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">J枚rg P. M眉ller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1912.08929v1-abstract-short" style="display: inline;"> Platoons, vehicles that travel very close together acting as one, promise to improve road usage on freeways and city roads alike. We study platoon formation in the context of same-day delivery in urban environments. Multiple self-interested logistic service providers (LSP) carry out same-day deliveries by deploying autonomous electric vehicles that are capable of forming and traveling in platoons.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1912.08929v1-abstract-full').style.display = 'inline'; document.getElementById('1912.08929v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1912.08929v1-abstract-full" style="display: none;"> Platoons, vehicles that travel very close together acting as one, promise to improve road usage on freeways and city roads alike. We study platoon formation in the context of same-day delivery in urban environments. Multiple self-interested logistic service providers (LSP) carry out same-day deliveries by deploying autonomous electric vehicles that are capable of forming and traveling in platoons. The novel aspect that we consider in our research is heterogeneity of platoons in the sense that vehicles are equipped with different capabilities and constraints, and belong to different providers. Our aim is to examine how these platoons can form and their potential properties and benefits. We present a platoon forming and routing algorithm, called PFaRA, that finds longest common routes for multiple vehicles, while also respecting vehicle preferences and constraints. PFaRA consists of two parts, a speed clustering step and a linear optimisation step. To test the approach, a simulation was used, working with realistic urban network data and background traffic models. Our results showed that the performance of our approach is comparable to a simple route-matching one, but it leads to better utility values for vehicles and by extension the LSPs. We show that the grouping provided is viable and provides benefits to all vehicles participating in the platoon. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1912.08929v1-abstract-full').style.display = 'none'; document.getElementById('1912.08929v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to &#34;Communications in Computer and Information Science&#34; published by Springer</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Communications in Computer and Information Science, vol 1217 (2021) pages: 297--320 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1910.04404">arXiv:1910.04404</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1910.04404">pdf</a>, <a href="https://arxiv.org/format/1910.04404">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1609/aaai.v34i09.7077">10.1609/aaai.v34i09.7077 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> AI for Explaining Decisions in Multi-Agent Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kraus%2C+S">Sarit Kraus</a>, <a href="/search/cs?searchtype=author&amp;query=Azaria%2C+A">Amos Azaria</a>, <a href="/search/cs?searchtype=author&amp;query=Fiosina%2C+J">Jelena Fiosina</a>, <a href="/search/cs?searchtype=author&amp;query=Greve%2C+M">Maike Greve</a>, <a href="/search/cs?searchtype=author&amp;query=Hazon%2C+N">Noam Hazon</a>, <a href="/search/cs?searchtype=author&amp;query=Kolbe%2C+L">Lutz Kolbe</a>, <a href="/search/cs?searchtype=author&amp;query=Lembcke%2C+T">Tim-Benjamin Lembcke</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">J枚rg P. M眉ller</a>, <a href="/search/cs?searchtype=author&amp;query=Schleibaum%2C+S">S枚ren Schleibaum</a>, <a href="/search/cs?searchtype=author&amp;query=Vollrath%2C+M">Mark Vollrath</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1910.04404v2-abstract-short" style="display: inline;"> Explanation is necessary for humans to understand and accept decisions made by an AI system when the system&#39;s goal is known. It is even more important when the AI system makes decisions in multi-agent environments where the human does not know the systems&#39; goals since they may depend on other agents&#39; preferences. In such situations, explanations should aim to increase user satisfaction, taking int&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.04404v2-abstract-full').style.display = 'inline'; document.getElementById('1910.04404v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1910.04404v2-abstract-full" style="display: none;"> Explanation is necessary for humans to understand and accept decisions made by an AI system when the system&#39;s goal is known. It is even more important when the AI system makes decisions in multi-agent environments where the human does not know the systems&#39; goals since they may depend on other agents&#39; preferences. In such situations, explanations should aim to increase user satisfaction, taking into account the system&#39;s decision, the user&#39;s and the other agents&#39; preferences, the environment settings and properties such as fairness, envy and privacy. Generating explanations that will increase user satisfaction is very challenging; to this end, we propose a new research direction: xMASE. We then review the state of the art and discuss research directions towards efficient methodologies and algorithms for generating explanations that will increase users&#39; satisfaction from AI system&#39;s decisions in multi-agent environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.04404v2-abstract-full').style.display = 'none'; document.getElementById('1910.04404v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This paper has been submitted to the Blue Sky Track of the AAAI 2020 conference. At the time of submission, it is under review. The tentative notification date will be November 10, 2019. Current version: Name of first author had been added in metadata</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1709.08235">arXiv:1709.08235</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1709.08235">pdf</a>, <a href="https://arxiv.org/format/1709.08235">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> Dynamic Path Planning and Movement Control in Pedestrian Simulation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Johora%2C+F+T">Fatema Tuj Johora</a>, <a href="/search/cs?searchtype=author&amp;query=Kraus%2C+P">Philipp Kraus</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%BCller%2C+J+P">J枚rg P. M眉ller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1709.08235v1-abstract-short" style="display: inline;"> Modeling and simulation of pedestrian behavior is used in applications such as planning large buildings, disaster management, or urban planning. Realistically simulating pedestrian behavior is challenging, due to the complexity of individual behavior as well as the complexity of interactions of pedestrians with each other and with the environment. This work-in-progress paper addresses the tactical&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1709.08235v1-abstract-full').style.display = 'inline'; document.getElementById('1709.08235v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1709.08235v1-abstract-full" style="display: none;"> Modeling and simulation of pedestrian behavior is used in applications such as planning large buildings, disaster management, or urban planning. Realistically simulating pedestrian behavior is challenging, due to the complexity of individual behavior as well as the complexity of interactions of pedestrians with each other and with the environment. This work-in-progress paper addresses the tactical (path planning) and the operational level (movement control) of pedestrian simulation from the perspective of multiagent-based modeling. We propose (1) an novel extension of the JPS routing algorithm for tactical planning, and (2) an architecture how path planning can be integrated with a social-force based movement control. The architecture is inspired by layered architectures for robot planning and control. We validate correctness and efficiency of our approach through simulation runs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1709.08235v1-abstract-full').style.display = 'none'; document.getElementById('1709.08235v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 September, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This paper was accepted for the preproceedings of The 2nd International Workshop on Agent-based modelling of urban systems (ABMUS 2017), http://www.modelling-urban-systems.com/</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.11; I.2.0 </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10