CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;12 of 12 results for author: <span class="mathjax">Roth, H R</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/eess" aria-role="search"> Searching in archive <strong>eess</strong>. <a href="/search/?searchtype=author&amp;query=Roth%2C+H+R">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Roth, H R"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Roth%2C+H+R&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Roth, H R"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.13632">arXiv:2407.13632</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.13632">pdf</a>, <a href="https://arxiv.org/format/2407.13632">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Data Alchemy: Mitigating Cross-Site Model Variability Through Test Time Data Calibration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Parida%2C+A">Abhijeet Parida</a>, <a href="/search/eess?searchtype=author&amp;query=Alomar%2C+A">Antonia Alomar</a>, <a href="/search/eess?searchtype=author&amp;query=Jiang%2C+Z">Zhifan Jiang</a>, <a href="/search/eess?searchtype=author&amp;query=Roshanitabrizi%2C+P">Pooneh Roshanitabrizi</a>, <a href="/search/eess?searchtype=author&amp;query=Tapp%2C+A">Austin Tapp</a>, <a href="/search/eess?searchtype=author&amp;query=Ledesma-Carbayo%2C+M">Maria Ledesma-Carbayo</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+Z">Ziyue Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Anwar%2C+S+M">Syed Muhammed Anwar</a>, <a href="/search/eess?searchtype=author&amp;query=Linguraru%2C+M+G">Marius George Linguraru</a>, <a href="/search/eess?searchtype=author&amp;query=Roth%2C+H+R">Holger R. Roth</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.13632v1-abstract-short" style="display: inline;"> Deploying deep learning-based imaging tools across various clinical sites poses significant challenges due to inherent domain shifts and regulatory hurdles associated with site-specific fine-tuning. For histopathology, stain normalization techniques can mitigate discrepancies, but they often fall short of eliminating inter-site variations. Therefore, we present Data Alchemy, an explainable stain n&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.13632v1-abstract-full').style.display = 'inline'; document.getElementById('2407.13632v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.13632v1-abstract-full" style="display: none;"> Deploying deep learning-based imaging tools across various clinical sites poses significant challenges due to inherent domain shifts and regulatory hurdles associated with site-specific fine-tuning. For histopathology, stain normalization techniques can mitigate discrepancies, but they often fall short of eliminating inter-site variations. Therefore, we present Data Alchemy, an explainable stain normalization method combined with test time data calibration via a template learning framework to overcome barriers in cross-site analysis. Data Alchemy handles shifts inherent to multi-site data and minimizes them without needing to change the weights of the normalization or classifier networks. Our approach extends to unseen sites in various clinical settings where data domain discrepancies are unknown. Extensive experiments highlight the efficacy of our framework in tumor classification in hematoxylin and eosin-stained patches. Our explainable normalization method boosts classification tasks&#39; area under the precision-recall curve(AUPR) by 0.165, 0.545 to 0.710. Additionally, Data Alchemy further reduces the multisite classification domain gap, by improving the 0.710 AUPR an additional 0.142, elevating classification performance further to 0.852, from 0.545. Our Data Alchemy framework can popularize precision medicine with minimal operational overhead by allowing for the seamless integration of pre-trained deep learning-based clinical tools across multiple sites. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.13632v1-abstract-full').style.display = 'none'; document.getElementById('2407.13632v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted to Machine Learning in Medical Imaging (MLMI 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.02604">arXiv:2407.02604</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.02604">pdf</a>, <a href="https://arxiv.org/format/2407.02604">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> D-Rax: Domain-specific Radiologic assistant leveraging multi-modal data and eXpert model predictions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Nisar%2C+H">Hareem Nisar</a>, <a href="/search/eess?searchtype=author&amp;query=Anwar%2C+S+M">Syed Muhammad Anwar</a>, <a href="/search/eess?searchtype=author&amp;query=Jiang%2C+Z">Zhifan Jiang</a>, <a href="/search/eess?searchtype=author&amp;query=Parida%2C+A">Abhijeet Parida</a>, <a href="/search/eess?searchtype=author&amp;query=Sanchez-Jacob%2C+R">Ramon Sanchez-Jacob</a>, <a href="/search/eess?searchtype=author&amp;query=Nath%2C+V">Vishwesh Nath</a>, <a href="/search/eess?searchtype=author&amp;query=Roth%2C+H+R">Holger R. Roth</a>, <a href="/search/eess?searchtype=author&amp;query=Linguraru%2C+M+G">Marius George Linguraru</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.02604v2-abstract-short" style="display: inline;"> Large vision language models (VLMs) have progressed incredibly from research to applicability for general-purpose use cases. LLaVA-Med, a pioneering large language and vision assistant for biomedicine, can perform multi-modal biomedical image and data analysis to provide a natural language interface for radiologists. While it is highly generalizable and works with multi-modal data, it is currently&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.02604v2-abstract-full').style.display = 'inline'; document.getElementById('2407.02604v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.02604v2-abstract-full" style="display: none;"> Large vision language models (VLMs) have progressed incredibly from research to applicability for general-purpose use cases. LLaVA-Med, a pioneering large language and vision assistant for biomedicine, can perform multi-modal biomedical image and data analysis to provide a natural language interface for radiologists. While it is highly generalizable and works with multi-modal data, it is currently limited by well-known challenges that exist in the large language model space. Hallucinations and imprecision in responses can lead to misdiagnosis which currently hinder the clinical adaptability of VLMs. To create precise, user-friendly models in healthcare, we propose D-Rax -- a domain-specific, conversational, radiologic assistance tool that can be used to gain insights about a particular radiologic image. In this study, we enhance the conversational analysis of chest X-ray (CXR) images to support radiological reporting, offering comprehensive insights from medical imaging and aiding in the formulation of accurate diagnosis. D-Rax is achieved by fine-tuning the LLaVA-Med architecture on our curated enhanced instruction-following data, comprising of images, instructions, as well as disease diagnosis and demographic predictions derived from MIMIC-CXR imaging data, CXR-related visual question answer (VQA) pairs, and predictive outcomes from multiple expert AI models. We observe statistically significant improvement in responses when evaluated for both open and close-ended conversations. Leveraging the power of state-of-the-art diagnostic models combined with VLMs, D-Rax empowers clinicians to interact with medical images using natural language, which could potentially streamline their decision-making process, enhance diagnostic accuracy, and conserve their time. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.02604v2-abstract-full').style.display = 'none'; document.getElementById('2407.02604v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted to the MICCAI 2024 Second International Workshop on Foundation Models for General Medical AI</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.14900">arXiv:2405.14900</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.14900">pdf</a>, <a href="https://arxiv.org/format/2405.14900">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.media.2024.103206.">10.1016/j.media.2024.103206. <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fair Evaluation of Federated Learning Algorithms for Automated Breast Density Classification: The Results of the 2022 ACR-NCI-NVIDIA Federated Learning Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Schmidt%2C+K">Kendall Schmidt</a>, <a href="/search/eess?searchtype=author&amp;query=Bearce%2C+B">Benjamin Bearce</a>, <a href="/search/eess?searchtype=author&amp;query=Chang%2C+K">Ken Chang</a>, <a href="/search/eess?searchtype=author&amp;query=Coombs%2C+L">Laura Coombs</a>, <a href="/search/eess?searchtype=author&amp;query=Farahani%2C+K">Keyvan Farahani</a>, <a href="/search/eess?searchtype=author&amp;query=Elbatele%2C+M">Marawan Elbatele</a>, <a href="/search/eess?searchtype=author&amp;query=Mouhebe%2C+K">Kaouther Mouhebe</a>, <a href="/search/eess?searchtype=author&amp;query=Marti%2C+R">Robert Marti</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+R">Ruipeng Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+Y">Yao Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+Y">Yanfeng Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Hu%2C+Y">Yaojun Hu</a>, <a href="/search/eess?searchtype=author&amp;query=Ying%2C+H">Haochao Ying</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+Y">Yuyang Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Testagrose%2C+C">Conrad Testagrose</a>, <a href="/search/eess?searchtype=author&amp;query=Demirer%2C+M">Mutlu Demirer</a>, <a href="/search/eess?searchtype=author&amp;query=Gupta%2C+V">Vikash Gupta</a>, <a href="/search/eess?searchtype=author&amp;query=Ak%C3%BCnal%2C+%C3%9C">脺nal Ak眉nal</a>, <a href="/search/eess?searchtype=author&amp;query=Bujotzek%2C+M">Markus Bujotzek</a>, <a href="/search/eess?searchtype=author&amp;query=Maier-Hein%2C+K+H">Klaus H. Maier-Hein</a>, <a href="/search/eess?searchtype=author&amp;query=Qin%2C+Y">Yi Qin</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+X">Xiaomeng Li</a>, <a href="/search/eess?searchtype=author&amp;query=Kalpathy-Cramer%2C+J">Jayashree Kalpathy-Cramer</a>, <a href="/search/eess?searchtype=author&amp;query=Roth%2C+H+R">Holger R. Roth</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.14900v1-abstract-short" style="display: inline;"> The correct interpretation of breast density is important in the assessment of breast cancer risk. AI has been shown capable of accurately predicting breast density, however, due to the differences in imaging characteristics across mammography systems, models built using data from one system do not generalize well to other systems. Though federated learning (FL) has emerged as a way to improve the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.14900v1-abstract-full').style.display = 'inline'; document.getElementById('2405.14900v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.14900v1-abstract-full" style="display: none;"> The correct interpretation of breast density is important in the assessment of breast cancer risk. AI has been shown capable of accurately predicting breast density, however, due to the differences in imaging characteristics across mammography systems, models built using data from one system do not generalize well to other systems. Though federated learning (FL) has emerged as a way to improve the generalizability of AI without the need to share data, the best way to preserve features from all training data during FL is an active area of research. To explore FL methodology, the breast density classification FL challenge was hosted in partnership with the American College of Radiology, Harvard Medical School&#39;s Mass General Brigham, University of Colorado, NVIDIA, and the National Institutes of Health National Cancer Institute. Challenge participants were able to submit docker containers capable of implementing FL on three simulated medical facilities, each containing a unique large mammography dataset. The breast density FL challenge ran from June 15 to September 5, 2022, attracting seven finalists from around the world. The winning FL submission reached a linear kappa score of 0.653 on the challenge test data and 0.413 on an external testing dataset, scoring comparably to a model trained on the same data in a central location. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.14900v1-abstract-full').style.display = 'none'; document.getElementById('2405.14900v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 9 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Medical Image Analysis Volume 95, July 2024, 103206 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.10655">arXiv:2305.10655</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.10655">pdf</a>, <a href="https://arxiv.org/format/2305.10655">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-031-17027-0_2">10.1007/978-3-031-17027-0_2 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> DeepEdit: Deep Editable Learning for Interactive Segmentation of 3D Medical Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Diaz-Pinto%2C+A">Andres Diaz-Pinto</a>, <a href="/search/eess?searchtype=author&amp;query=Mehta%2C+P">Pritesh Mehta</a>, <a href="/search/eess?searchtype=author&amp;query=Alle%2C+S">Sachidanand Alle</a>, <a href="/search/eess?searchtype=author&amp;query=Asad%2C+M">Muhammad Asad</a>, <a href="/search/eess?searchtype=author&amp;query=Brown%2C+R">Richard Brown</a>, <a href="/search/eess?searchtype=author&amp;query=Nath%2C+V">Vishwesh Nath</a>, <a href="/search/eess?searchtype=author&amp;query=Ihsani%2C+A">Alvin Ihsani</a>, <a href="/search/eess?searchtype=author&amp;query=Antonelli%2C+M">Michela Antonelli</a>, <a href="/search/eess?searchtype=author&amp;query=Palkovics%2C+D">Daniel Palkovics</a>, <a href="/search/eess?searchtype=author&amp;query=Pinter%2C+C">Csaba Pinter</a>, <a href="/search/eess?searchtype=author&amp;query=Alkalay%2C+R">Ron Alkalay</a>, <a href="/search/eess?searchtype=author&amp;query=Pieper%2C+S">Steve Pieper</a>, <a href="/search/eess?searchtype=author&amp;query=Roth%2C+H+R">Holger R. Roth</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+D">Daguang Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Dogra%2C+P">Prerna Dogra</a>, <a href="/search/eess?searchtype=author&amp;query=Vercauteren%2C+T">Tom Vercauteren</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+A">Andrew Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Quraini%2C+A">Abood Quraini</a>, <a href="/search/eess?searchtype=author&amp;query=Ourselin%2C+S">Sebastien Ourselin</a>, <a href="/search/eess?searchtype=author&amp;query=Cardoso%2C+M+J">M. Jorge Cardoso</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.10655v1-abstract-short" style="display: inline;"> Automatic segmentation of medical images is a key step for diagnostic and interventional tasks. However, achieving this requires large amounts of annotated volumes, which can be tedious and time-consuming task for expert annotators. In this paper, we introduce DeepEdit, a deep learning-based method for volumetric medical image annotation, that allows automatic and semi-automatic segmentation, and&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.10655v1-abstract-full').style.display = 'inline'; document.getElementById('2305.10655v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.10655v1-abstract-full" style="display: none;"> Automatic segmentation of medical images is a key step for diagnostic and interventional tasks. However, achieving this requires large amounts of annotated volumes, which can be tedious and time-consuming task for expert annotators. In this paper, we introduce DeepEdit, a deep learning-based method for volumetric medical image annotation, that allows automatic and semi-automatic segmentation, and click-based refinement. DeepEdit combines the power of two methods: a non-interactive (i.e. automatic segmentation using nnU-Net, UNET or UNETR) and an interactive segmentation method (i.e. DeepGrow), into a single deep learning model. It allows easy integration of uncertainty-based ranking strategies (i.e. aleatoric and epistemic uncertainty computation) and active learning. We propose and implement a method for training DeepEdit by using standard training combined with user interaction simulation. Once trained, DeepEdit allows clinicians to quickly segment their datasets by using the algorithm in auto segmentation mode or by providing clicks via a user interface (i.e. 3D Slicer, OHIF). We show the value of DeepEdit through evaluation on the PROSTATEx dataset for prostate/prostatic lesions and the Multi-Atlas Labeling Beyond the Cranial Vault (BTCV) dataset for abdominal CT segmentation, using state-of-the-art network architectures as baseline for comparison. DeepEdit could reduce the time and effort annotating 3D medical images compared to DeepGrow alone. Source code is available at https://github.com/Project-MONAI/MONAILabel <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.10655v1-abstract-full').style.display = 'none'; document.getElementById('2305.10655v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.12362">arXiv:2203.12362</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.12362">pdf</a>, <a href="https://arxiv.org/format/2203.12362">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.media.2024.103207">10.1016/j.media.2024.103207 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> MONAI Label: A framework for AI-assisted Interactive Labeling of 3D Medical Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Diaz-Pinto%2C+A">Andres Diaz-Pinto</a>, <a href="/search/eess?searchtype=author&amp;query=Alle%2C+S">Sachidanand Alle</a>, <a href="/search/eess?searchtype=author&amp;query=Nath%2C+V">Vishwesh Nath</a>, <a href="/search/eess?searchtype=author&amp;query=Tang%2C+Y">Yucheng Tang</a>, <a href="/search/eess?searchtype=author&amp;query=Ihsani%2C+A">Alvin Ihsani</a>, <a href="/search/eess?searchtype=author&amp;query=Asad%2C+M">Muhammad Asad</a>, <a href="/search/eess?searchtype=author&amp;query=P%C3%A9rez-Garc%C3%ADa%2C+F">Fernando P茅rez-Garc铆a</a>, <a href="/search/eess?searchtype=author&amp;query=Mehta%2C+P">Pritesh Mehta</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+W">Wenqi Li</a>, <a href="/search/eess?searchtype=author&amp;query=Flores%2C+M">Mona Flores</a>, <a href="/search/eess?searchtype=author&amp;query=Roth%2C+H+R">Holger R. Roth</a>, <a href="/search/eess?searchtype=author&amp;query=Vercauteren%2C+T">Tom Vercauteren</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+D">Daguang Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Dogra%2C+P">Prerna Dogra</a>, <a href="/search/eess?searchtype=author&amp;query=Ourselin%2C+S">Sebastien Ourselin</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+A">Andrew Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Cardoso%2C+M+J">M. Jorge Cardoso</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.12362v2-abstract-short" style="display: inline;"> The lack of annotated datasets is a major bottleneck for training new task-specific supervised machine learning models, considering that manual annotation is extremely expensive and time-consuming. To address this problem, we present MONAI Label, a free and open-source framework that facilitates the development of applications based on artificial intelligence (AI) models that aim at reducing the t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.12362v2-abstract-full').style.display = 'inline'; document.getElementById('2203.12362v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.12362v2-abstract-full" style="display: none;"> The lack of annotated datasets is a major bottleneck for training new task-specific supervised machine learning models, considering that manual annotation is extremely expensive and time-consuming. To address this problem, we present MONAI Label, a free and open-source framework that facilitates the development of applications based on artificial intelligence (AI) models that aim at reducing the time required to annotate radiology datasets. Through MONAI Label, researchers can develop AI annotation applications focusing on their domain of expertise. It allows researchers to readily deploy their apps as services, which can be made available to clinicians via their preferred user interface. Currently, MONAI Label readily supports locally installed (3D Slicer) and web-based (OHIF) frontends and offers two active learning strategies to facilitate and speed up the training of segmentation algorithms. MONAI Label allows researchers to make incremental improvements to their AI-based annotation application by making them available to other researchers and clinicians alike. Additionally, MONAI Label provides sample AI-based interactive and non-interactive labeling applications, that can be used directly off the shelf, as plug-and-play to any given dataset. Significant reduced annotation times using the interactive model can be observed on two public datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.12362v2-abstract-full').style.display = 'none'; document.getElementById('2203.12362v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.06338">arXiv:2203.06338</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.06338">pdf</a>, <a href="https://arxiv.org/format/2203.06338">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Auto-FedRL: Federated Hyperparameter Optimization for Multi-institutional Medical Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Guo%2C+P">Pengfei Guo</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+D">Dong Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Hatamizadeh%2C+A">Ali Hatamizadeh</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+A">An Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+Z">Ziyue Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+W">Wenqi Li</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+C">Can Zhao</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+D">Daguang Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Harmon%2C+S">Stephanie Harmon</a>, <a href="/search/eess?searchtype=author&amp;query=Turkbey%2C+E">Evrim Turkbey</a>, <a href="/search/eess?searchtype=author&amp;query=Turkbey%2C+B">Baris Turkbey</a>, <a href="/search/eess?searchtype=author&amp;query=Wood%2C+B">Bradford Wood</a>, <a href="/search/eess?searchtype=author&amp;query=Patella%2C+F">Francesca Patella</a>, <a href="/search/eess?searchtype=author&amp;query=Stellato%2C+E">Elvira Stellato</a>, <a href="/search/eess?searchtype=author&amp;query=Carrafiello%2C+G">Gianpaolo Carrafiello</a>, <a href="/search/eess?searchtype=author&amp;query=Patel%2C+V+M">Vishal M. Patel</a>, <a href="/search/eess?searchtype=author&amp;query=Roth%2C+H+R">Holger R. Roth</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.06338v2-abstract-short" style="display: inline;"> Federated learning (FL) is a distributed machine learning technique that enables collaborative model training while avoiding explicit data sharing. The inherent privacy-preserving property of FL algorithms makes them especially attractive to the medical field. However, in case of heterogeneous client data distributions, standard FL methods are unstable and require intensive hyperparameter tuning t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.06338v2-abstract-full').style.display = 'inline'; document.getElementById('2203.06338v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.06338v2-abstract-full" style="display: none;"> Federated learning (FL) is a distributed machine learning technique that enables collaborative model training while avoiding explicit data sharing. The inherent privacy-preserving property of FL algorithms makes them especially attractive to the medical field. However, in case of heterogeneous client data distributions, standard FL methods are unstable and require intensive hyperparameter tuning to achieve optimal performance. Conventional hyperparameter optimization algorithms are impractical in real-world FL applications as they involve numerous training trials, which are often not affordable with limited compute budgets. In this work, we propose an efficient reinforcement learning (RL)-based federated hyperparameter optimization algorithm, termed Auto-FedRL, in which an online RL agent can dynamically adjust hyperparameters of each client based on the current training progress. Extensive experiments are conducted to investigate different search strategies and RL agents. The effectiveness of the proposed method is validated on a heterogeneous data split of the CIFAR-10 dataset as well as two real-world medical image segmentation datasets for COVID-19 lesion segmentation in chest CT and pancreas segmentation in abdominal CT. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.06338v2-abstract-full').style.display = 'none'; document.getElementById('2203.06338v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.07535">arXiv:2111.07535</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2111.07535">pdf</a>, <a href="https://arxiv.org/format/2111.07535">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> T-AutoML: Automated Machine Learning for Lesion Segmentation using Transformers in 3D Medical Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Yang%2C+D">Dong Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+X">Xiaosong Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+Z">Ziyue Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Roth%2C+H+R">Holger R. Roth</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+D">Daguang Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.07535v1-abstract-short" style="display: inline;"> Lesion segmentation in medical imaging has been an important topic in clinical research. Researchers have proposed various detection and segmentation algorithms to address this task. Recently, deep learning-based approaches have significantly improved the performance over conventional methods. However, most state-of-the-art deep learning methods require the manual design of multiple network compon&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.07535v1-abstract-full').style.display = 'inline'; document.getElementById('2111.07535v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.07535v1-abstract-full" style="display: none;"> Lesion segmentation in medical imaging has been an important topic in clinical research. Researchers have proposed various detection and segmentation algorithms to address this task. Recently, deep learning-based approaches have significantly improved the performance over conventional methods. However, most state-of-the-art deep learning methods require the manual design of multiple network components and training strategies. In this paper, we propose a new automated machine learning algorithm, T-AutoML, which not only searches for the best neural architecture, but also finds the best combination of hyper-parameters and data augmentation strategies simultaneously. The proposed method utilizes the modern transformer model, which is introduced to adapt to the dynamic length of the search space embedding and can significantly improve the ability of the search. We validate T-AutoML on several large-scale public lesion segmentation data-sets and achieve state-of-the-art performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.07535v1-abstract-full').style.display = 'none'; document.getElementById('2111.07535v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at ICCV 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.08111">arXiv:2107.08111</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.08111">pdf</a>, <a href="https://arxiv.org/format/2107.08111">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Federated Whole Prostate Segmentation in MRI with Personalized Neural Architectures </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Roth%2C+H+R">Holger R. Roth</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+D">Dong Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+W">Wenqi Li</a>, <a href="/search/eess?searchtype=author&amp;query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/eess?searchtype=author&amp;query=Zhu%2C+W">Wentao Zhu</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+Z">Ziyue Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+X">Xiaosong Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+D">Daguang Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.08111v1-abstract-short" style="display: inline;"> Building robust deep learning-based models requires diverse training data, ideally from several sources. However, these datasets cannot be combined easily because of patient privacy concerns or regulatory hurdles, especially if medical data is involved. Federated learning (FL) is a way to train machine learning models without the need for centralized datasets. Each FL client trains on their local&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.08111v1-abstract-full').style.display = 'inline'; document.getElementById('2107.08111v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.08111v1-abstract-full" style="display: none;"> Building robust deep learning-based models requires diverse training data, ideally from several sources. However, these datasets cannot be combined easily because of patient privacy concerns or regulatory hurdles, especially if medical data is involved. Federated learning (FL) is a way to train machine learning models without the need for centralized datasets. Each FL client trains on their local data while only sharing model parameters with a global server that aggregates the parameters from all clients. At the same time, each client&#39;s data can exhibit differences and inconsistencies due to the local variation in the patient population, imaging equipment, and acquisition protocols. Hence, the federated learned models should be able to adapt to the local particularities of a client&#39;s data. In this work, we combine FL with an AutoML technique based on local neural architecture search by training a &#34;supernet&#34;. Furthermore, we propose an adaptation scheme to allow for personalized model architectures at each FL client&#39;s site. The proposed method is evaluated on four different datasets from 3D prostate MRI and shown to improve the local models&#39; performance after adaptation through selecting an optimal path through the AutoML supernet. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.08111v1-abstract-full').style.display = 'none'; document.getElementById('2107.08111v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">MICCAI 2021 accepted</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.11750">arXiv:2011.11750</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2011.11750">pdf</a>, <a href="https://arxiv.org/format/2011.11750">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Federated Semi-Supervised Learning for COVID Region Segmentation in Chest CT using Multi-National Data from China, Italy, Japan </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Yang%2C+D">Dong Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+Z">Ziyue Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+W">Wenqi Li</a>, <a href="/search/eess?searchtype=author&amp;query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/eess?searchtype=author&amp;query=Roth%2C+H+R">Holger R. Roth</a>, <a href="/search/eess?searchtype=author&amp;query=Harmon%2C+S">Stephanie Harmon</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+S">Sheng Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Turkbey%2C+B">Baris Turkbey</a>, <a href="/search/eess?searchtype=author&amp;query=Turkbey%2C+E">Evrim Turkbey</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+X">Xiaosong Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Zhu%2C+W">Wentao Zhu</a>, <a href="/search/eess?searchtype=author&amp;query=Carrafiello%2C+G">Gianpaolo Carrafiello</a>, <a href="/search/eess?searchtype=author&amp;query=Patella%2C+F">Francesca Patella</a>, <a href="/search/eess?searchtype=author&amp;query=Cariati%2C+M">Maurizio Cariati</a>, <a href="/search/eess?searchtype=author&amp;query=Obinata%2C+H">Hirofumi Obinata</a>, <a href="/search/eess?searchtype=author&amp;query=Mori%2C+H">Hitoshi Mori</a>, <a href="/search/eess?searchtype=author&amp;query=Tamura%2C+K">Kaku Tamura</a>, <a href="/search/eess?searchtype=author&amp;query=An%2C+P">Peng An</a>, <a href="/search/eess?searchtype=author&amp;query=Wood%2C+B+J">Bradford J. Wood</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+D">Daguang Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.11750v1-abstract-short" style="display: inline;"> The recent outbreak of COVID-19 has led to urgent needs for reliable diagnosis and management of SARS-CoV-2 infection. As a complimentary tool, chest CT has been shown to be able to reveal visual patterns characteristic for COVID-19, which has definite value at several stages during the disease course. To facilitate CT analysis, recent efforts have focused on computer-aided characterization and di&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.11750v1-abstract-full').style.display = 'inline'; document.getElementById('2011.11750v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.11750v1-abstract-full" style="display: none;"> The recent outbreak of COVID-19 has led to urgent needs for reliable diagnosis and management of SARS-CoV-2 infection. As a complimentary tool, chest CT has been shown to be able to reveal visual patterns characteristic for COVID-19, which has definite value at several stages during the disease course. To facilitate CT analysis, recent efforts have focused on computer-aided characterization and diagnosis, which has shown promising results. However, domain shift of data across clinical data centers poses a serious challenge when deploying learning-based models. In this work, we attempt to find a solution for this challenge via federated and semi-supervised learning. A multi-national database consisting of 1704 scans from three countries is adopted to study the performance gap, when training a model with one dataset and applying it to another. Expert radiologists manually delineated 945 scans for COVID-19 findings. In handling the variability in both the data and annotations, a novel federated semi-supervised learning technique is proposed to fully utilize all available data (with or without annotations). Federated learning avoids the need for sensitive data-sharing, which makes it favorable for institutions and nations with strict regulatory policy on data privacy. Moreover, semi-supervision potentially reduces the annotation burden under a distributed setting. The proposed framework is shown to be effective compared to fully supervised scenarios with conventional data sharing instead of model weight sharing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.11750v1-abstract-full').style.display = 'none'; document.getElementById('2011.11750v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted with minor revision to Medical Image Analysis</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.13148">arXiv:2009.13148</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.13148">pdf</a>, <a href="https://arxiv.org/format/2009.13148">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Automated Pancreas Segmentation Using Multi-institutional Collaborative Deep Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Wang%2C+P">Pochuan Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Shen%2C+C">Chen Shen</a>, <a href="/search/eess?searchtype=author&amp;query=Roth%2C+H+R">Holger R. Roth</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+D">Dong Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+D">Daguang Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Oda%2C+M">Masahiro Oda</a>, <a href="/search/eess?searchtype=author&amp;query=Misawa%2C+K">Kazunari Misawa</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+P">Po-Ting Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+K">Kao-Lang Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Liao%2C+W">Wei-Chih Liao</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+W">Weichung Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Mori%2C+K">Kensaku Mori</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.13148v1-abstract-short" style="display: inline;"> The performance of deep learning-based methods strongly relies on the number of datasets used for training. Many efforts have been made to increase the data in the medical image analysis field. However, unlike photography images, it is hard to generate centralized databases to collect medical images because of numerous technical, legal, and privacy issues. In this work, we study the use of federat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.13148v1-abstract-full').style.display = 'inline'; document.getElementById('2009.13148v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.13148v1-abstract-full" style="display: none;"> The performance of deep learning-based methods strongly relies on the number of datasets used for training. Many efforts have been made to increase the data in the medical image analysis field. However, unlike photography images, it is hard to generate centralized databases to collect medical images because of numerous technical, legal, and privacy issues. In this work, we study the use of federated learning between two institutions in a real-world setting to collaboratively train a model without sharing the raw data across national boundaries. We quantitatively compare the segmentation models obtained with federated learning and local training alone. Our experimental results show that federated learning models have higher generalizability than standalone training. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.13148v1-abstract-full').style.display = 'none'; document.getElementById('2009.13148v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by MICCAI DCL Workshop 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.01871">arXiv:2009.01871</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.01871">pdf</a>, <a href="https://arxiv.org/format/2009.01871">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-60548-3_18">10.1007/978-3-030-60548-3_18 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Federated Learning for Breast Density Classification: A Real-World Implementation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Roth%2C+H+R">Holger R. Roth</a>, <a href="/search/eess?searchtype=author&amp;query=Chang%2C+K">Ken Chang</a>, <a href="/search/eess?searchtype=author&amp;query=Singh%2C+P">Praveer Singh</a>, <a href="/search/eess?searchtype=author&amp;query=Neumark%2C+N">Nir Neumark</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+W">Wenqi Li</a>, <a href="/search/eess?searchtype=author&amp;query=Gupta%2C+V">Vikash Gupta</a>, <a href="/search/eess?searchtype=author&amp;query=Gupta%2C+S">Sharut Gupta</a>, <a href="/search/eess?searchtype=author&amp;query=Qu%2C+L">Liangqiong Qu</a>, <a href="/search/eess?searchtype=author&amp;query=Ihsani%2C+A">Alvin Ihsani</a>, <a href="/search/eess?searchtype=author&amp;query=Bizzo%2C+B+C">Bernardo C. Bizzo</a>, <a href="/search/eess?searchtype=author&amp;query=Wen%2C+Y">Yuhong Wen</a>, <a href="/search/eess?searchtype=author&amp;query=Buch%2C+V">Varun Buch</a>, <a href="/search/eess?searchtype=author&amp;query=Shah%2C+M">Meesam Shah</a>, <a href="/search/eess?searchtype=author&amp;query=Kitamura%2C+F">Felipe Kitamura</a>, <a href="/search/eess?searchtype=author&amp;query=Mendon%C3%A7a%2C+M">Matheus Mendon莽a</a>, <a href="/search/eess?searchtype=author&amp;query=Lavor%2C+V">Vitor Lavor</a>, <a href="/search/eess?searchtype=author&amp;query=Harouni%2C+A">Ahmed Harouni</a>, <a href="/search/eess?searchtype=author&amp;query=Compas%2C+C">Colin Compas</a>, <a href="/search/eess?searchtype=author&amp;query=Tetreault%2C+J">Jesse Tetreault</a>, <a href="/search/eess?searchtype=author&amp;query=Dogra%2C+P">Prerna Dogra</a>, <a href="/search/eess?searchtype=author&amp;query=Cheng%2C+Y">Yan Cheng</a>, <a href="/search/eess?searchtype=author&amp;query=Erdal%2C+S">Selnur Erdal</a>, <a href="/search/eess?searchtype=author&amp;query=White%2C+R">Richard White</a>, <a href="/search/eess?searchtype=author&amp;query=Hashemian%2C+B">Behrooz Hashemian</a>, <a href="/search/eess?searchtype=author&amp;query=Schultz%2C+T">Thomas Schultz</a> , et al. (18 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.01871v3-abstract-short" style="display: inline;"> Building robust deep learning-based models requires large quantities of diverse training data. In this study, we investigate the use of federated learning (FL) to build medical imaging classification models in a real-world collaborative setting. Seven clinical institutions from across the world joined this FL effort to train a model for breast density classification based on Breast Imaging, Report&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.01871v3-abstract-full').style.display = 'inline'; document.getElementById('2009.01871v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.01871v3-abstract-full" style="display: none;"> Building robust deep learning-based models requires large quantities of diverse training data. In this study, we investigate the use of federated learning (FL) to build medical imaging classification models in a real-world collaborative setting. Seven clinical institutions from across the world joined this FL effort to train a model for breast density classification based on Breast Imaging, Reporting &amp; Data System (BI-RADS). We show that despite substantial differences among the datasets from all sites (mammography system, class distribution, and data set size) and without centralizing data, we can successfully train AI models in federation. The results show that models trained using FL perform 6.3% on average better than their counterparts trained on an institute&#39;s local data alone. Furthermore, we show a 45.8% relative improvement in the models&#39; generalizability when evaluated on the other participating sites&#39; testing data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.01871v3-abstract-full').style.display = 'none'; document.getElementById('2009.01871v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at the 1st MICCAI Workshop on &#34;Distributed And Collaborative Learning&#34;; add citation to Fig. 1 &amp; 2 and update Fig. 5; fix typo in affiliations</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> In: Albarqouni S. et al. (eds) Domain Adaptation and Representation Transfer, and Distributed and Collaborative Learning. DART 2020, DCL 2020. Lecture Notes in Computer Science, vol 12444. Springer, Cham </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1908.01543">arXiv:1908.01543</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1908.01543">pdf</a>, <a href="https://arxiv.org/format/1908.01543">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.compmedimag.2019.101642">10.1016/j.compmedimag.2019.101642 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Precise Estimation of Renal Vascular Dominant Regions Using Spatially Aware Fully Convolutional Networks, Tensor-Cut and Voronoi Diagrams </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Wang%2C+C">Chenglong Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Roth%2C+H+R">Holger R. Roth</a>, <a href="/search/eess?searchtype=author&amp;query=Kitasaka%2C+T">Takayuki Kitasaka</a>, <a href="/search/eess?searchtype=author&amp;query=Oda%2C+M">Masahiro Oda</a>, <a href="/search/eess?searchtype=author&amp;query=Hayashi%2C+Y">Yuichiro Hayashi</a>, <a href="/search/eess?searchtype=author&amp;query=Yoshino%2C+Y">Yasushi Yoshino</a>, <a href="/search/eess?searchtype=author&amp;query=Yamamoto%2C+T">Tokunori Yamamoto</a>, <a href="/search/eess?searchtype=author&amp;query=Sassa%2C+N">Naoto Sassa</a>, <a href="/search/eess?searchtype=author&amp;query=Goto%2C+M">Momokazu Goto</a>, <a href="/search/eess?searchtype=author&amp;query=Mori%2C+K">Kensaku Mori</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1908.01543v1-abstract-short" style="display: inline;"> This paper presents a new approach for precisely estimating the renal vascular dominant region using a Voronoi diagram. To provide computer-assisted diagnostics for the pre-surgical simulation of partial nephrectomy surgery, we must obtain information on the renal arteries and the renal vascular dominant regions. We propose a fully automatic segmentation method that combines a neural network and t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.01543v1-abstract-full').style.display = 'inline'; document.getElementById('1908.01543v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1908.01543v1-abstract-full" style="display: none;"> This paper presents a new approach for precisely estimating the renal vascular dominant region using a Voronoi diagram. To provide computer-assisted diagnostics for the pre-surgical simulation of partial nephrectomy surgery, we must obtain information on the renal arteries and the renal vascular dominant regions. We propose a fully automatic segmentation method that combines a neural network and tensor-based graph-cut methods to precisely extract the kidney and renal arteries. First, we use a convolutional neural network to localize the kidney regions and extract tiny renal arteries with a tensor-based graph-cut method. Then we generate a Voronoi diagram to estimate the renal vascular dominant regions based on the segmented kidney and renal arteries. The accuracy of kidney segmentation in 27 cases with 8-fold cross validation reached a Dice score of 95%. The accuracy of renal artery segmentation in 8 cases obtained a centerline overlap ratio of 80%. Each partition region corresponds to a renal vascular dominant region. The final dominant-region estimation accuracy achieved a Dice coefficient of 80%. A clinical application showed the potential of our proposed estimation approach in a real clinical surgical environment. Further validation using large-scale database is our future work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.01543v1-abstract-full').style.display = 'none'; document.getElementById('1908.01543v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Computerized Medical Imaging and Graphics 77 (2019): 101642 </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10