CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;11 of 11 results for author: <span class="mathjax">Kirby, J</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Kirby%2C+J">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Kirby, J"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Kirby%2C+J&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Kirby, J"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.11202">arXiv:2407.11202</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.11202">pdf</a>, <a href="https://arxiv.org/format/2407.11202">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Actuation without production bias </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kirby%2C+J">James Kirby</a>, <a href="/search/cs?searchtype=author&amp;query=Sonderegger%2C+M">Morgan Sonderegger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.11202v1-abstract-short" style="display: inline;"> Phonetic production bias is the external force most commonly invoked in computational models of sound change, despite the fact that it is not responsible for all, or even most, sound changes. Furthermore, the existence of production bias alone cannot account for how changes do or do not propagate throughout a speech community. While many other factors have been invoked by (socio)phoneticians, incl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.11202v1-abstract-full').style.display = 'inline'; document.getElementById('2407.11202v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.11202v1-abstract-full" style="display: none;"> Phonetic production bias is the external force most commonly invoked in computational models of sound change, despite the fact that it is not responsible for all, or even most, sound changes. Furthermore, the existence of production bias alone cannot account for how changes do or do not propagate throughout a speech community. While many other factors have been invoked by (socio)phoneticians, including but not limited to contact (between subpopulations) and differences in social evaluation (of variants, groups, or individuals), these are not typically modeled in computational simulations of sound change. In this paper, we consider whether production biases have a unique dynamics in terms of how they impact the population-level spread of change in a setting where agents learn from multiple teachers. We show that, while the dynamics conditioned by production bias are not unique, it is not the case that all perturbing forces have the same dynamics: in particular, if social weight is a function of individual teachers and the correlation between a teacher&#39;s social weight and the extent to which they realize a production bias is weak, change is unlikely to propagate. Nevertheless, it remains the case that changes initiated from different sources may display a similar dynamics. A more nuanced understanding of how population structure interacts with individual biases can thus provide a (partial) solution to the `non-phonologization problem&#39;. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.11202v1-abstract-full').style.display = 'none'; document.getElementById('2407.11202v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint of chapter to be published in _Speech Dynamics: Synchronic Variation and Diachronic Change_</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.09011">arXiv:2305.09011</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.09011">pdf</a>, <a href="https://arxiv.org/format/2305.09011">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> The Brain Tumor Segmentation (BraTS) Challenge 2023: Brain MR Image Synthesis for Tumor Segmentation (BraSyn) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Conte%2C+G+M">Gian Marco Conte</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Q">Qingqiao Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Anwar%2C+S+M">Syed Muhammad Anwar</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=van+Leemput%2C+K">Koen van Leemput</a>, <a href="/search/cs?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a>, <a href="/search/cs?searchtype=author&amp;query=Diaz%2C+M">Maria Diaz</a>, <a href="/search/cs?searchtype=author&amp;query=Cole%2C+B">Byrone Cole</a>, <a href="/search/cs?searchtype=author&amp;query=Calabrese%2C+E">Evan Calabrese</a>, <a href="/search/cs?searchtype=author&amp;query=Rudie%2C+J">Jeff Rudie</a>, <a href="/search/cs?searchtype=author&amp;query=Meissen%2C+F">Felix Meissen</a>, <a href="/search/cs?searchtype=author&amp;query=Adewole%2C+M">Maruf Adewole</a>, <a href="/search/cs?searchtype=author&amp;query=Janas%2C+A">Anastasia Janas</a>, <a href="/search/cs?searchtype=author&amp;query=Kazerooni%2C+A+F">Anahita Fathi Kazerooni</a>, <a href="/search/cs?searchtype=author&amp;query=LaBella%2C+D">Dominic LaBella</a>, <a href="/search/cs?searchtype=author&amp;query=Moawad%2C+A+W">Ahmed W. Moawad</a>, <a href="/search/cs?searchtype=author&amp;query=Farahani%2C+K">Keyvan Farahani</a>, <a href="/search/cs?searchtype=author&amp;query=Eddy%2C+J">James Eddy</a>, <a href="/search/cs?searchtype=author&amp;query=Bergquist%2C+T">Timothy Bergquist</a>, <a href="/search/cs?searchtype=author&amp;query=Chung%2C+V">Verena Chung</a>, <a href="/search/cs?searchtype=author&amp;query=Shinohara%2C+R+T">Russell Takeshi Shinohara</a>, <a href="/search/cs?searchtype=author&amp;query=Dako%2C+F">Farouk Dako</a>, <a href="/search/cs?searchtype=author&amp;query=Wiggins%2C+W">Walter Wiggins</a> , et al. (44 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.09011v6-abstract-short" style="display: inline;"> Automated brain tumor segmentation methods have become well-established and reached performance levels offering clear clinical utility. These methods typically rely on four input magnetic resonance imaging (MRI) modalities: T1-weighted images with and without contrast enhancement, T2-weighted images, and FLAIR images. However, some sequences are often missing in clinical practice due to time const&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.09011v6-abstract-full').style.display = 'inline'; document.getElementById('2305.09011v6-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.09011v6-abstract-full" style="display: none;"> Automated brain tumor segmentation methods have become well-established and reached performance levels offering clear clinical utility. These methods typically rely on four input magnetic resonance imaging (MRI) modalities: T1-weighted images with and without contrast enhancement, T2-weighted images, and FLAIR images. However, some sequences are often missing in clinical practice due to time constraints or image artifacts, such as patient motion. Consequently, the ability to substitute missing modalities and gain segmentation performance is highly desirable and necessary for the broader adoption of these algorithms in the clinical routine. In this work, we present the establishment of the Brain MR Image Synthesis Benchmark (BraSyn) in conjunction with the Medical Image Computing and Computer-Assisted Intervention (MICCAI) 2023. The primary objective of this challenge is to evaluate image synthesis methods that can realistically generate missing MRI modalities when multiple available images are provided. The ultimate aim is to facilitate automated brain tumor segmentation pipelines. The image dataset used in the benchmark is diverse and multi-modal, created through collaboration with various hospitals and research institutions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.09011v6-abstract-full').style.display = 'none'; document.getElementById('2305.09011v6-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Technical report of BraSyn</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.08992">arXiv:2305.08992</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.08992">pdf</a>, <a href="https://arxiv.org/format/2305.08992">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> The Brain Tumor Segmentation (BraTS) Challenge: Local Synthesis of Healthy Brain Tissue via Inpainting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Meissen%2C+F">Felix Meissen</a>, <a href="/search/cs?searchtype=author&amp;query=Steinbauer%2C+F">Felix Steinbauer</a>, <a href="/search/cs?searchtype=author&amp;query=Graf%2C+R">Robert Graf</a>, <a href="/search/cs?searchtype=author&amp;query=Ehrlich%2C+S+K">Stefan K Ehrlich</a>, <a href="/search/cs?searchtype=author&amp;query=Reinke%2C+A">Annika Reinke</a>, <a href="/search/cs?searchtype=author&amp;query=Oswald%2C+E">Eva Oswald</a>, <a href="/search/cs?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/cs?searchtype=author&amp;query=Hoelzl%2C+F">Florian Hoelzl</a>, <a href="/search/cs?searchtype=author&amp;query=Horvath%2C+I">Izabela Horvath</a>, <a href="/search/cs?searchtype=author&amp;query=Turgut%2C+O">Oezguen Turgut</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Bukas%2C+C">Christina Bukas</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=de+da+Rosa%2C+E">Ezequiel de da Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Mekki%2C+I">Isra Mekki</a>, <a href="/search/cs?searchtype=author&amp;query=Vinayahalingam%2C+S">Shankeeth Vinayahalingam</a>, <a href="/search/cs?searchtype=author&amp;query=Kassem%2C+H">Hasan Kassem</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Juexin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+K">Ke Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Weng%2C+Y">Ying Weng</a>, <a href="/search/cs?searchtype=author&amp;query=Durrer%2C+A">Alicia Durrer</a>, <a href="/search/cs?searchtype=author&amp;query=Cattin%2C+P+C">Philippe C. Cattin</a>, <a href="/search/cs?searchtype=author&amp;query=Wolleb%2C+J">Julia Wolleb</a> , et al. (81 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.08992v3-abstract-short" style="display: inline;"> A myriad of algorithms for the automatic analysis of brain MR images is available to support clinicians in their decision-making. For brain tumor patients, the image acquisition time series typically starts with an already pathological scan. This poses problems, as many algorithms are designed to analyze healthy brains and provide no guarantee for images featuring lesions. Examples include, but ar&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.08992v3-abstract-full').style.display = 'inline'; document.getElementById('2305.08992v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.08992v3-abstract-full" style="display: none;"> A myriad of algorithms for the automatic analysis of brain MR images is available to support clinicians in their decision-making. For brain tumor patients, the image acquisition time series typically starts with an already pathological scan. This poses problems, as many algorithms are designed to analyze healthy brains and provide no guarantee for images featuring lesions. Examples include, but are not limited to, algorithms for brain anatomy parcellation, tissue segmentation, and brain extraction. To solve this dilemma, we introduce the BraTS inpainting challenge. Here, the participants explore inpainting techniques to synthesize healthy brain scans from lesioned ones. The following manuscript contains the task formulation, dataset, and submission procedure. Later, it will be updated to summarize the findings of the challenge. The challenge is organized as part of the ASNR-BraTS MICCAI challenge. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.08992v3-abstract-full').style.display = 'none'; document.getElementById('2305.08992v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.10473">arXiv:2303.10473</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.10473">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Report of the Medical Image De-Identification (MIDI) Task Group -- Best Practices and Recommendations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Clunie%2C+D+A">David A. Clunie</a>, <a href="/search/cs?searchtype=author&amp;query=Flanders%2C+A">Adam Flanders</a>, <a href="/search/cs?searchtype=author&amp;query=Taylor%2C+A">Adam Taylor</a>, <a href="/search/cs?searchtype=author&amp;query=Erickson%2C+B">Brad Erickson</a>, <a href="/search/cs?searchtype=author&amp;query=Bialecki%2C+B">Brian Bialecki</a>, <a href="/search/cs?searchtype=author&amp;query=Brundage%2C+D">David Brundage</a>, <a href="/search/cs?searchtype=author&amp;query=Gutman%2C+D">David Gutman</a>, <a href="/search/cs?searchtype=author&amp;query=Prior%2C+F">Fred Prior</a>, <a href="/search/cs?searchtype=author&amp;query=Seibert%2C+J+A">J Anthony Seibert</a>, <a href="/search/cs?searchtype=author&amp;query=Perry%2C+J">John Perry</a>, <a href="/search/cs?searchtype=author&amp;query=Gichoya%2C+J+W">Judy Wawira Gichoya</a>, <a href="/search/cs?searchtype=author&amp;query=Kirby%2C+J">Justin Kirby</a>, <a href="/search/cs?searchtype=author&amp;query=Andriole%2C+K">Katherine Andriole</a>, <a href="/search/cs?searchtype=author&amp;query=Geneslaw%2C+L">Luke Geneslaw</a>, <a href="/search/cs?searchtype=author&amp;query=Moore%2C+S">Steve Moore</a>, <a href="/search/cs?searchtype=author&amp;query=Fitzgerald%2C+T">TJ Fitzgerald</a>, <a href="/search/cs?searchtype=author&amp;query=Tellis%2C+W">Wyatt Tellis</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+Y">Ying Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Farahani%2C+K">Keyvan Farahani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.10473v2-abstract-short" style="display: inline;"> This report addresses the technical aspects of de-identification of medical images of human subjects and biospecimens, such that re-identification risk of ethical, moral, and legal concern is sufficiently reduced to allow unrestricted public sharing for any purpose, regardless of the jurisdiction of the source and distribution sites. All medical images, regardless of the mode of acquisition, are c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.10473v2-abstract-full').style.display = 'inline'; document.getElementById('2303.10473v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.10473v2-abstract-full" style="display: none;"> This report addresses the technical aspects of de-identification of medical images of human subjects and biospecimens, such that re-identification risk of ethical, moral, and legal concern is sufficiently reduced to allow unrestricted public sharing for any purpose, regardless of the jurisdiction of the source and distribution sites. All medical images, regardless of the mode of acquisition, are considered, though the primary emphasis is on those with accompanying data elements, especially those encoded in formats in which the data elements are embedded, particularly Digital Imaging and Communications in Medicine (DICOM). These images include image-like objects such as Segmentations, Parametric Maps, and Radiotherapy (RT) Dose objects. The scope also includes related non-image objects, such as RT Structure Sets, Plans and Dose Volume Histograms, Structured Reports, and Presentation States. Only de-identification of publicly released data is considered, and alternative approaches to privacy preservation, such as federated learning for artificial intelligence (AI) model development, are out of scope, as are issues of privacy leakage from AI model sharing. Only technical issues of public sharing are addressed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.10473v2-abstract-full').style.display = 'none'; document.getElementById('2303.10473v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">131 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.02701">arXiv:2211.02701</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2211.02701">pdf</a>, <a href="https://arxiv.org/format/2211.02701">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MONAI: An open-source framework for deep learning in healthcare </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cardoso%2C+M+J">M. Jorge Cardoso</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+W">Wenqi Li</a>, <a href="/search/cs?searchtype=author&amp;query=Brown%2C+R">Richard Brown</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+N">Nic Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Kerfoot%2C+E">Eric Kerfoot</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yiheng Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Murrey%2C+B">Benjamin Murrey</a>, <a href="/search/cs?searchtype=author&amp;query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Can Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+D">Dong Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Nath%2C+V">Vishwesh Nath</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+Y">Yufan He</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Z">Ziyue Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Hatamizadeh%2C+A">Ali Hatamizadeh</a>, <a href="/search/cs?searchtype=author&amp;query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+W">Wentao Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yun Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+M">Mingxin Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+Y">Yucheng Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+I">Isaac Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zephyr%2C+M">Michael Zephyr</a>, <a href="/search/cs?searchtype=author&amp;query=Hashemian%2C+B">Behrooz Hashemian</a>, <a href="/search/cs?searchtype=author&amp;query=Alle%2C+S">Sachidanand Alle</a>, <a href="/search/cs?searchtype=author&amp;query=Darestani%2C+M+Z">Mohammad Zalbagi Darestani</a>, <a href="/search/cs?searchtype=author&amp;query=Budd%2C+C">Charlie Budd</a> , et al. (32 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.02701v1-abstract-short" style="display: inline;"> Artificial Intelligence (AI) is having a tremendous impact across most areas of science. Applications of AI in healthcare have the potential to improve our ability to detect, diagnose, prognose, and intervene on human disease. For AI models to be used clinically, they need to be made safe, reproducible and robust, and the underlying software framework must be aware of the particularities (e.g. geo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.02701v1-abstract-full').style.display = 'inline'; document.getElementById('2211.02701v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.02701v1-abstract-full" style="display: none;"> Artificial Intelligence (AI) is having a tremendous impact across most areas of science. Applications of AI in healthcare have the potential to improve our ability to detect, diagnose, prognose, and intervene on human disease. For AI models to be used clinically, they need to be made safe, reproducible and robust, and the underlying software framework must be aware of the particularities (e.g. geometry, physiology, physics) of medical data being processed. This work introduces MONAI, a freely available, community-supported, and consortium-led PyTorch-based framework for deep learning in healthcare. MONAI extends PyTorch to support medical data, with a particular focus on imaging, and provide purpose-specific AI model architectures, transformations and utilities that streamline the development and deployment of medical AI models. MONAI follows best practices for software-development, providing an easy-to-use, robust, well-documented, and well-tested software framework. MONAI preserves the simple, additive, and compositional approach of its underlying PyTorch libraries. MONAI is being used by and receiving contributions from research, clinical and industrial teams from around the world, who are pursuing applications spanning nearly every aspect of healthcare. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.02701v1-abstract-full').style.display = 'none'; document.getElementById('2211.02701v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">www.monai.io</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.00458">arXiv:2201.00458</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.00458">pdf</a>, <a href="https://arxiv.org/format/2201.00458">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Lung-Originated Tumor Segmentation from Computed Tomography Scan (LOTUS) Benchmark </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Afshar%2C+P">Parnian Afshar</a>, <a href="/search/cs?searchtype=author&amp;query=Mohammadi%2C+A">Arash Mohammadi</a>, <a href="/search/cs?searchtype=author&amp;query=Plataniotis%2C+K+N">Konstantinos N. Plataniotis</a>, <a href="/search/cs?searchtype=author&amp;query=Farahani%2C+K">Keyvan Farahani</a>, <a href="/search/cs?searchtype=author&amp;query=Kirby%2C+J">Justin Kirby</a>, <a href="/search/cs?searchtype=author&amp;query=Oikonomou%2C+A">Anastasia Oikonomou</a>, <a href="/search/cs?searchtype=author&amp;query=Asif%2C+A">Amir Asif</a>, <a href="/search/cs?searchtype=author&amp;query=Wee%2C+L">Leonard Wee</a>, <a href="/search/cs?searchtype=author&amp;query=Dekker%2C+A">Andre Dekker</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+X">Xin Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Haque%2C+M+A">Mohammad Ariful Haque</a>, <a href="/search/cs?searchtype=author&amp;query=Hossain%2C+S">Shahruk Hossain</a>, <a href="/search/cs?searchtype=author&amp;query=Hasan%2C+M+K">Md. Kamrul Hasan</a>, <a href="/search/cs?searchtype=author&amp;query=Kamal%2C+U">Uday Kamal</a>, <a href="/search/cs?searchtype=author&amp;query=Hsu%2C+W">Winston Hsu</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+J">Jhih-Yuan Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Rahman%2C+M+S">M. Sohel Rahman</a>, <a href="/search/cs?searchtype=author&amp;query=Ibtehaz%2C+N">Nabil Ibtehaz</a>, <a href="/search/cs?searchtype=author&amp;query=Foisol%2C+S+M+A">Sh. M. Amir Foisol</a>, <a href="/search/cs?searchtype=author&amp;query=Lam%2C+K">Kin-Man Lam</a>, <a href="/search/cs?searchtype=author&amp;query=Guang%2C+Z">Zhong Guang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+R">Runze Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Channappayya%2C+S+S">Sumohana S. Channappayya</a>, <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+S">Shashank Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Dev%2C+C">Chander Dev</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.00458v1-abstract-short" style="display: inline;"> Lung cancer is one of the deadliest cancers, and in part its effective diagnosis and treatment depend on the accurate delineation of the tumor. Human-centered segmentation, which is currently the most common approach, is subject to inter-observer variability, and is also time-consuming, considering the fact that only experts are capable of providing annotations. Automatic and semi-automatic tumor&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.00458v1-abstract-full').style.display = 'inline'; document.getElementById('2201.00458v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.00458v1-abstract-full" style="display: none;"> Lung cancer is one of the deadliest cancers, and in part its effective diagnosis and treatment depend on the accurate delineation of the tumor. Human-centered segmentation, which is currently the most common approach, is subject to inter-observer variability, and is also time-consuming, considering the fact that only experts are capable of providing annotations. Automatic and semi-automatic tumor segmentation methods have recently shown promising results. However, as different researchers have validated their algorithms using various datasets and performance metrics, reliably evaluating these methods is still an open challenge. The goal of the Lung-Originated Tumor Segmentation from Computed Tomography Scan (LOTUS) Benchmark created through 2018 IEEE Video and Image Processing (VIP) Cup competition, is to provide a unique dataset and pre-defined metrics, so that different researchers can develop and evaluate their methods in a unified fashion. The 2018 VIP Cup started with a global engagement from 42 countries to access the competition data. At the registration stage, there were 129 members clustered into 28 teams from 10 countries, out of which 9 teams made it to the final stage and 6 teams successfully completed all the required tasks. In a nutshell, all the algorithms proposed during the competition, are based on deep learning models combined with a false positive reduction technique. Methods developed by the three finalists show promising results in tumor segmentation, however, more effort should be put into reducing the false positive rate. This competition manuscript presents an overview of the VIP-Cup challenge, along with the proposed algorithms and results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.00458v1-abstract-full').style.display = 'none'; document.getElementById('2201.00458v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.10074">arXiv:2112.10074</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.10074">pdf</a>, <a href="https://arxiv.org/format/2112.10074">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.59275/j.melba.2022-354b">10.59275/j.melba.2022-354b <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> QU-BraTS: MICCAI BraTS 2020 Challenge on Quantifying Uncertainty in Brain Tumor Segmentation - Analysis of Ranking Scores and Benchmarking Results </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mehta%2C+R">Raghav Mehta</a>, <a href="/search/cs?searchtype=author&amp;query=Filos%2C+A">Angelos Filos</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Sako%2C+C">Chiharu Sako</a>, <a href="/search/cs?searchtype=author&amp;query=McKinley%2C+R">Richard McKinley</a>, <a href="/search/cs?searchtype=author&amp;query=Rebsamen%2C+M">Michael Rebsamen</a>, <a href="/search/cs?searchtype=author&amp;query=Datwyler%2C+K">Katrin Datwyler</a>, <a href="/search/cs?searchtype=author&amp;query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&amp;query=Radojewski%2C+P">Piotr Radojewski</a>, <a href="/search/cs?searchtype=author&amp;query=Murugesan%2C+G+K">Gowtham Krishnan Murugesan</a>, <a href="/search/cs?searchtype=author&amp;query=Nalawade%2C+S">Sahil Nalawade</a>, <a href="/search/cs?searchtype=author&amp;query=Ganesh%2C+C">Chandan Ganesh</a>, <a href="/search/cs?searchtype=author&amp;query=Wagner%2C+B">Ben Wagner</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+F+F">Fang F. Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Fei%2C+B">Baowei Fei</a>, <a href="/search/cs?searchtype=author&amp;query=Madhuranthakam%2C+A+J">Ananth J. Madhuranthakam</a>, <a href="/search/cs?searchtype=author&amp;query=Maldjian%2C+J+A">Joseph A. Maldjian</a>, <a href="/search/cs?searchtype=author&amp;query=Daza%2C+L">Laura Daza</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez%2C+C">Catalina Gomez</a>, <a href="/search/cs?searchtype=author&amp;query=Arbelaez%2C+P">Pablo Arbelaez</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+C">Chengliang Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shuo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Reynaud%2C+H">Hadrien Reynaud</a>, <a href="/search/cs?searchtype=author&amp;query=Mo%2C+Y">Yuan-han Mo</a>, <a href="/search/cs?searchtype=author&amp;query=Angelini%2C+E">Elsa Angelini</a> , et al. (67 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.10074v2-abstract-short" style="display: inline;"> Deep learning (DL) models have provided state-of-the-art performance in various medical imaging benchmarking challenges, including the Brain Tumor Segmentation (BraTS) challenges. However, the task of focal pathology multi-compartment segmentation (e.g., tumor and lesion sub-regions) is particularly challenging, and potential errors hinder translating DL models into clinical workflows. Quantifying&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.10074v2-abstract-full').style.display = 'inline'; document.getElementById('2112.10074v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.10074v2-abstract-full" style="display: none;"> Deep learning (DL) models have provided state-of-the-art performance in various medical imaging benchmarking challenges, including the Brain Tumor Segmentation (BraTS) challenges. However, the task of focal pathology multi-compartment segmentation (e.g., tumor and lesion sub-regions) is particularly challenging, and potential errors hinder translating DL models into clinical workflows. Quantifying the reliability of DL model predictions in the form of uncertainties could enable clinical review of the most uncertain regions, thereby building trust and paving the way toward clinical translation. Several uncertainty estimation methods have recently been introduced for DL medical image segmentation tasks. Developing scores to evaluate and compare the performance of uncertainty measures will assist the end-user in making more informed decisions. In this study, we explore and evaluate a score developed during the BraTS 2019 and BraTS 2020 task on uncertainty quantification (QU-BraTS) and designed to assess and rank uncertainty estimates for brain tumor multi-compartment segmentation. This score (1) rewards uncertainty estimates that produce high confidence in correct assertions and those that assign low confidence levels at incorrect assertions, and (2) penalizes uncertainty measures that lead to a higher percentage of under-confident correct assertions. We further benchmark the segmentation uncertainties generated by 14 independent participating teams of QU-BraTS 2020, all of which also participated in the main BraTS segmentation task. Overall, our findings confirm the importance and complementary value that uncertainty estimates provide to segmentation algorithms, highlighting the need for uncertainty quantification in medical image analyses. Finally, in favor of transparency and reproducibility, our evaluation code is made publicly available at: https://github.com/RagMeh11/QU-BraTS. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.10074v2-abstract-full').style.display = 'none'; document.getElementById('2112.10074v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at the Journal of Machine Learning for Biomedical Imaging (MELBA): https://www.melba-journal.org/papers/2022:026.html</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Machine.Learning.for.Biomedical.Imaging. 1 (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.02314">arXiv:2107.02314</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.02314">pdf</a>, <a href="https://arxiv.org/format/2107.02314">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> The RSNA-ASNR-MICCAI BraTS 2021 Benchmark on Brain Tumor Segmentation and Radiogenomic Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Ghodasara%2C+S">Satyam Ghodasara</a>, <a href="/search/cs?searchtype=author&amp;query=Mohan%2C+S">Suyash Mohan</a>, <a href="/search/cs?searchtype=author&amp;query=Bilello%2C+M">Michel Bilello</a>, <a href="/search/cs?searchtype=author&amp;query=Calabrese%2C+E">Evan Calabrese</a>, <a href="/search/cs?searchtype=author&amp;query=Colak%2C+E">Errol Colak</a>, <a href="/search/cs?searchtype=author&amp;query=Farahani%2C+K">Keyvan Farahani</a>, <a href="/search/cs?searchtype=author&amp;query=Kalpathy-Cramer%2C+J">Jayashree Kalpathy-Cramer</a>, <a href="/search/cs?searchtype=author&amp;query=Kitamura%2C+F+C">Felipe C. Kitamura</a>, <a href="/search/cs?searchtype=author&amp;query=Pati%2C+S">Sarthak Pati</a>, <a href="/search/cs?searchtype=author&amp;query=Prevedello%2C+L+M">Luciano M. Prevedello</a>, <a href="/search/cs?searchtype=author&amp;query=Rudie%2C+J+D">Jeffrey D. Rudie</a>, <a href="/search/cs?searchtype=author&amp;query=Sako%2C+C">Chiharu Sako</a>, <a href="/search/cs?searchtype=author&amp;query=Shinohara%2C+R+T">Russell T. Shinohara</a>, <a href="/search/cs?searchtype=author&amp;query=Bergquist%2C+T">Timothy Bergquist</a>, <a href="/search/cs?searchtype=author&amp;query=Chai%2C+R">Rong Chai</a>, <a href="/search/cs?searchtype=author&amp;query=Eddy%2C+J">James Eddy</a>, <a href="/search/cs?searchtype=author&amp;query=Elliott%2C+J">Julia Elliott</a>, <a href="/search/cs?searchtype=author&amp;query=Reade%2C+W">Walter Reade</a>, <a href="/search/cs?searchtype=author&amp;query=Schaffter%2C+T">Thomas Schaffter</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+T">Thomas Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+J">Jiaxin Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Moawad%2C+A+W">Ahmed W. Moawad</a>, <a href="/search/cs?searchtype=author&amp;query=Coelho%2C+L+O">Luiz Otavio Coelho</a>, <a href="/search/cs?searchtype=author&amp;query=McDonnell%2C+O">Olivia McDonnell</a> , et al. (78 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.02314v2-abstract-short" style="display: inline;"> The BraTS 2021 challenge celebrates its 10th anniversary and is jointly organized by the Radiological Society of North America (RSNA), the American Society of Neuroradiology (ASNR), and the Medical Image Computing and Computer Assisted Interventions (MICCAI) society. Since its inception, BraTS has been focusing on being a common benchmarking venue for brain glioma segmentation algorithms, with wel&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.02314v2-abstract-full').style.display = 'inline'; document.getElementById('2107.02314v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.02314v2-abstract-full" style="display: none;"> The BraTS 2021 challenge celebrates its 10th anniversary and is jointly organized by the Radiological Society of North America (RSNA), the American Society of Neuroradiology (ASNR), and the Medical Image Computing and Computer Assisted Interventions (MICCAI) society. Since its inception, BraTS has been focusing on being a common benchmarking venue for brain glioma segmentation algorithms, with well-curated multi-institutional multi-parametric magnetic resonance imaging (mpMRI) data. Gliomas are the most common primary malignancies of the central nervous system, with varying degrees of aggressiveness and prognosis. The RSNA-ASNR-MICCAI BraTS 2021 challenge targets the evaluation of computational algorithms assessing the same tumor compartmentalization, as well as the underlying tumor&#39;s molecular characterization, in pre-operative baseline mpMRI data from 2,040 patients. Specifically, the two tasks that BraTS 2021 focuses on are: a) the segmentation of the histologically distinct brain tumor sub-regions, and b) the classification of the tumor&#39;s O[6]-methylguanine-DNA methyltransferase (MGMT) promoter methylation status. The performance evaluation of all participating algorithms in BraTS 2021 will be conducted through the Sage Bionetworks Synapse platform (Task 1) and Kaggle (Task 2), concluding in distributing to the top ranked participants monetary awards of $60,000 collectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.02314v2-abstract-full').style.display = 'none'; document.getElementById('2107.02314v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages, 2 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.05874">arXiv:2105.05874</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2105.05874">pdf</a>, <a href="https://arxiv.org/format/2105.05874">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> The Federated Tumor Segmentation (FeTS) Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pati%2C+S">Sarthak Pati</a>, <a href="/search/cs?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&amp;query=Zenk%2C+M">Maximilian Zenk</a>, <a href="/search/cs?searchtype=author&amp;query=Edwards%2C+B">Brandon Edwards</a>, <a href="/search/cs?searchtype=author&amp;query=Sheller%2C+M">Micah Sheller</a>, <a href="/search/cs?searchtype=author&amp;query=Reina%2C+G+A">G. Anthony Reina</a>, <a href="/search/cs?searchtype=author&amp;query=Foley%2C+P">Patrick Foley</a>, <a href="/search/cs?searchtype=author&amp;query=Gruzdev%2C+A">Alexey Gruzdev</a>, <a href="/search/cs?searchtype=author&amp;query=Martin%2C+J">Jason Martin</a>, <a href="/search/cs?searchtype=author&amp;query=Albarqouni%2C+S">Shadi Albarqouni</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yong Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Shinohara%2C+R+T">Russell Taki Shinohara</a>, <a href="/search/cs?searchtype=author&amp;query=Reinke%2C+A">Annika Reinke</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmerer%2C+D">David Zimmerer</a>, <a href="/search/cs?searchtype=author&amp;query=Freymann%2C+J+B">John B. Freymann</a>, <a href="/search/cs?searchtype=author&amp;query=Kirby%2C+J+S">Justin S. Kirby</a>, <a href="/search/cs?searchtype=author&amp;query=Davatzikos%2C+C">Christos Davatzikos</a>, <a href="/search/cs?searchtype=author&amp;query=Colen%2C+R+R">Rivka R. Colen</a>, <a href="/search/cs?searchtype=author&amp;query=Kotrotsou%2C+A">Aikaterini Kotrotsou</a>, <a href="/search/cs?searchtype=author&amp;query=Marcus%2C+D">Daniel Marcus</a>, <a href="/search/cs?searchtype=author&amp;query=Milchenko%2C+M">Mikhail Milchenko</a>, <a href="/search/cs?searchtype=author&amp;query=Nazeri%2C+A">Arash Nazeri</a>, <a href="/search/cs?searchtype=author&amp;query=Fathallah-Shaykh%2C+H">Hassan Fathallah-Shaykh</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Jakab%2C+A">Andras Jakab</a> , et al. (7 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.05874v2-abstract-short" style="display: inline;"> This manuscript describes the first challenge on Federated Learning, namely the Federated Tumor Segmentation (FeTS) challenge 2021. International challenges have become the standard for validation of biomedical image analysis methods. However, the actual performance of participating (even the winning) algorithms on &#34;real-world&#34; clinical data often remains unclear, as the data included in challenge&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.05874v2-abstract-full').style.display = 'inline'; document.getElementById('2105.05874v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.05874v2-abstract-full" style="display: none;"> This manuscript describes the first challenge on Federated Learning, namely the Federated Tumor Segmentation (FeTS) challenge 2021. International challenges have become the standard for validation of biomedical image analysis methods. However, the actual performance of participating (even the winning) algorithms on &#34;real-world&#34; clinical data often remains unclear, as the data included in challenges are usually acquired in very controlled settings at few institutions. The seemingly obvious solution of just collecting increasingly more data from more institutions in such challenges does not scale well due to privacy and ownership hurdles. Towards alleviating these concerns, we are proposing the FeTS challenge 2021 to cater towards both the development and the evaluation of models for the segmentation of intrinsically heterogeneous (in appearance, shape, and histology) brain tumors, namely gliomas. Specifically, the FeTS 2021 challenge uses clinically acquired, multi-institutional magnetic resonance imaging (MRI) scans from the BraTS 2020 challenge, as well as from various remote independent institutions included in the collaborative network of a real-world federation (https://www.fets.ai/). The goals of the FeTS challenge are directly represented by the two included tasks: 1) the identification of the optimal weight aggregation approach towards the training of a consensus model that has gained knowledge via federated learning from multiple geographically distinct institutions, while their data are always retained within each institution, and 2) the federated evaluation of the generalizability of brain tumor segmentation models &#34;in the wild&#34;, i.e. on data from institutional distributions that were not part of the training datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.05874v2-abstract-full').style.display = 'none'; document.getElementById('2105.05874v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.02629">arXiv:1811.02629</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1811.02629">pdf</a>, <a href="https://arxiv.org/format/1811.02629">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Identifying the Best Machine Learning Algorithms for Brain Tumor Segmentation, Progression Assessment, and Overall Survival Prediction in the BRATS Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bakas%2C+S">Spyridon Bakas</a>, <a href="/search/cs?searchtype=author&amp;query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&amp;query=Jakab%2C+A">Andras Jakab</a>, <a href="/search/cs?searchtype=author&amp;query=Bauer%2C+S">Stefan Bauer</a>, <a href="/search/cs?searchtype=author&amp;query=Rempfler%2C+M">Markus Rempfler</a>, <a href="/search/cs?searchtype=author&amp;query=Crimi%2C+A">Alessandro Crimi</a>, <a href="/search/cs?searchtype=author&amp;query=Shinohara%2C+R+T">Russell Takeshi Shinohara</a>, <a href="/search/cs?searchtype=author&amp;query=Berger%2C+C">Christoph Berger</a>, <a href="/search/cs?searchtype=author&amp;query=Ha%2C+S+M">Sung Min Ha</a>, <a href="/search/cs?searchtype=author&amp;query=Rozycki%2C+M">Martin Rozycki</a>, <a href="/search/cs?searchtype=author&amp;query=Prastawa%2C+M">Marcel Prastawa</a>, <a href="/search/cs?searchtype=author&amp;query=Alberts%2C+E">Esther Alberts</a>, <a href="/search/cs?searchtype=author&amp;query=Lipkova%2C+J">Jana Lipkova</a>, <a href="/search/cs?searchtype=author&amp;query=Freymann%2C+J">John Freymann</a>, <a href="/search/cs?searchtype=author&amp;query=Kirby%2C+J">Justin Kirby</a>, <a href="/search/cs?searchtype=author&amp;query=Bilello%2C+M">Michel Bilello</a>, <a href="/search/cs?searchtype=author&amp;query=Fathallah-Shaykh%2C+H">Hassan Fathallah-Shaykh</a>, <a href="/search/cs?searchtype=author&amp;query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J">Jan Kirschke</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Colen%2C+R">Rivka Colen</a>, <a href="/search/cs?searchtype=author&amp;query=Kotrotsou%2C+A">Aikaterini Kotrotsou</a>, <a href="/search/cs?searchtype=author&amp;query=Lamontagne%2C+P">Pamela Lamontagne</a>, <a href="/search/cs?searchtype=author&amp;query=Marcus%2C+D">Daniel Marcus</a>, <a href="/search/cs?searchtype=author&amp;query=Milchenko%2C+M">Mikhail Milchenko</a> , et al. (402 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.02629v3-abstract-short" style="display: inline;"> Gliomas are the most common primary brain malignancies, with different degrees of aggressiveness, variable prognosis and various heterogeneous histologic sub-regions, i.e., peritumoral edematous/invaded tissue, necrotic core, active and non-enhancing core. This intrinsic heterogeneity is also portrayed in their radio-phenotype, as their sub-regions are depicted by varying intensity profiles dissem&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.02629v3-abstract-full').style.display = 'inline'; document.getElementById('1811.02629v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.02629v3-abstract-full" style="display: none;"> Gliomas are the most common primary brain malignancies, with different degrees of aggressiveness, variable prognosis and various heterogeneous histologic sub-regions, i.e., peritumoral edematous/invaded tissue, necrotic core, active and non-enhancing core. This intrinsic heterogeneity is also portrayed in their radio-phenotype, as their sub-regions are depicted by varying intensity profiles disseminated across multi-parametric magnetic resonance imaging (mpMRI) scans, reflecting varying biological properties. Their heterogeneous shape, extent, and location are some of the factors that make these tumors difficult to resect, and in some cases inoperable. The amount of resected tumor is a factor also considered in longitudinal scans, when evaluating the apparent tumor for potential diagnosis of progression. Furthermore, there is mounting evidence that accurate segmentation of the various tumor sub-regions can offer the basis for quantitative image analysis towards prediction of patient overall survival. This study assesses the state-of-the-art machine learning (ML) methods used for brain tumor image analysis in mpMRI scans, during the last seven instances of the International Brain Tumor Segmentation (BraTS) challenge, i.e., 2012-2018. Specifically, we focus on i) evaluating segmentations of the various glioma sub-regions in pre-operative mpMRI scans, ii) assessing potential tumor progression by virtue of longitudinal growth of tumor sub-regions, beyond use of the RECIST/RANO criteria, and iii) predicting the overall survival from pre-operative mpMRI scans of patients that underwent gross total resection. Finally, we investigate the challenge of identifying the best ML algorithms for each of these tasks, considering that apart from being diverse on each instance of the challenge, the multi-institutional mpMRI BraTS dataset has also been a continuously evolving/growing dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.02629v3-abstract-full').style.display = 'none'; document.getElementById('1811.02629v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The International Multimodal Brain Tumor Segmentation (BraTS) Challenge</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1507.04420">arXiv:1507.04420</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1507.04420">pdf</a>, <a href="https://arxiv.org/format/1507.04420">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> Bias and population structure in the actuation of sound change </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kirby%2C+J">James Kirby</a>, <a href="/search/cs?searchtype=author&amp;query=Sonderegger%2C+M">Morgan Sonderegger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1507.04420v1-abstract-short" style="display: inline;"> Why do human languages change at some times, and not others? We address this longstanding question from a computational perspective, focusing on the case of sound change. Sound change arises from the pronunciation variability ubiquitous in every speech community, but most such variability does not lead to change. Hence, an adequate model must allow for stability as well as change. Existing theorie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1507.04420v1-abstract-full').style.display = 'inline'; document.getElementById('1507.04420v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1507.04420v1-abstract-full" style="display: none;"> Why do human languages change at some times, and not others? We address this longstanding question from a computational perspective, focusing on the case of sound change. Sound change arises from the pronunciation variability ubiquitous in every speech community, but most such variability does not lead to change. Hence, an adequate model must allow for stability as well as change. Existing theories of sound change tend to emphasize factors at the level of individual learners promoting one outcome or the other, such as channel bias (which favors change) or inductive bias (which favors stability). Here, we consider how the interaction of these biases can lead to both stability and change in a population setting. We find that population structure itself can act as a source of stability, but that both stability and change are possible only when both types of bias are active, suggesting that it is possible to understand why sound change occurs at some times and not others as the population-level result of the interplay between forces promoting each outcome in individual speakers. In addition, if it is assumed that learners learn from two or more teachers, the transition from stability to change is marked by a phase transition, consistent with the abrupt transitions seen in many empirical cases of sound change. The predictions of multiple-teacher models thus match empirical cases of sound change better than the predictions of single-teacher models, underscoring the importance of modeling language change in a population setting. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1507.04420v1-abstract-full').style.display = 'none'; document.getElementById('1507.04420v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 July, 2015; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2015. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">30 pages, 7 figures</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10