CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–34 of 34 results for author: <span class="mathjax">Kather, J N</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Kather%2C+J+N">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Kather, J N"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Kather%2C+J+N&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Kather, J N"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16803">arXiv:2411.16803</a> <span> [<a href="https://arxiv.org/pdf/2411.16803">pdf</a>, <a href="https://arxiv.org/format/2411.16803">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Abnormality-Driven Representation Learning for Radiology Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ligero%2C+M">Marta Ligero</a>, <a href="/search/cs?searchtype=author&query=Lenz%2C+T">Tim Lenz</a>, <a href="/search/cs?searchtype=author&query=W%C3%B6lflein%2C+G">Georg W枚lflein</a>, <a href="/search/cs?searchtype=author&query=Nahhas%2C+O+S+M+E">Omar S. M. El Nahhas</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16803v1-abstract-short" style="display: inline;"> To date, the most common approach for radiology deep learning pipelines is the use of end-to-end 3D networks based on models pre-trained on other tasks, followed by fine-tuning on the task at hand. In contrast, adjacent medical fields such as pathology, which focus on 2D images, have effectively adopted task-agnostic foundational models based on self-supervised learning (SSL), combined with weakly… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16803v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16803v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16803v1-abstract-full" style="display: none;"> To date, the most common approach for radiology deep learning pipelines is the use of end-to-end 3D networks based on models pre-trained on other tasks, followed by fine-tuning on the task at hand. In contrast, adjacent medical fields such as pathology, which focus on 2D images, have effectively adopted task-agnostic foundational models based on self-supervised learning (SSL), combined with weakly-supervised deep learning (DL). However, the field of radiology still lacks task-agnostic representation models due to the computational and data demands of 3D imaging and the anatomical complexity inherent to radiology scans. To address this gap, we propose CLEAR, a framework for radiology images that uses extracted embeddings from 2D slices along with attention-based aggregation for efficiently predicting clinical endpoints. As part of this framework, we introduce lesion-enhanced contrastive learning (LeCL), a novel approach to obtain visual representations driven by abnormalities in 2D axial slices across different locations of the CT scans. Specifically, we trained single-domain contrastive learning approaches using three different architectures: Vision Transformers, Vision State Space Models and Gated Convolutional Neural Networks. We evaluate our approach across three clinical tasks: tumor lesion location, lung disease detection, and patient staging, benchmarking against four state-of-the-art foundation models, including BiomedCLIP. Our findings demonstrate that CLEAR using representations learned through LeCL, outperforms existing foundation models, while being substantially more compute- and data-efficient. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16803v1-abstract-full').style.display = 'none'; document.getElementById('2411.16803v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15802">arXiv:2411.15802</a> <span> [<a href="https://arxiv.org/pdf/2411.15802">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Medical Slice Transformer: Improved Diagnosis and Explainability on 3D Medical Images with DINOv2 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=M%C3%BCller-Franzes%2C+G">Gustav M眉ller-Franzes</a>, <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Siepmann%2C+R">Robert Siepmann</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15802v1-abstract-short" style="display: inline;"> MRI and CT are essential clinical cross-sectional imaging techniques for diagnosing complex conditions. However, large 3D datasets with annotations for deep learning are scarce. While methods like DINOv2 are encouraging for 2D image analysis, these methods have not been applied to 3D medical images. Furthermore, deep learning models often lack explainability due to their "black-box" nature. This s… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15802v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15802v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15802v1-abstract-full" style="display: none;"> MRI and CT are essential clinical cross-sectional imaging techniques for diagnosing complex conditions. However, large 3D datasets with annotations for deep learning are scarce. While methods like DINOv2 are encouraging for 2D image analysis, these methods have not been applied to 3D medical images. Furthermore, deep learning models often lack explainability due to their "black-box" nature. This study aims to extend 2D self-supervised models, specifically DINOv2, to 3D medical imaging while evaluating their potential for explainable outcomes. We introduce the Medical Slice Transformer (MST) framework to adapt 2D self-supervised models for 3D medical image analysis. MST combines a Transformer architecture with a 2D feature extractor, i.e., DINOv2. We evaluate its diagnostic performance against a 3D convolutional neural network (3D ResNet) across three clinical datasets: breast MRI (651 patients), chest CT (722 patients), and knee MRI (1199 patients). Both methods were tested for diagnosing breast cancer, predicting lung nodule dignity, and detecting meniscus tears. Diagnostic performance was assessed by calculating the Area Under the Receiver Operating Characteristic Curve (AUC). Explainability was evaluated through a radiologist's qualitative comparison of saliency maps based on slice and lesion correctness. P-values were calculated using Delong's test. MST achieved higher AUC values compared to ResNet across all three datasets: breast (0.94$\pm$0.01 vs. 0.91$\pm$0.02, P=0.02), chest (0.95$\pm$0.01 vs. 0.92$\pm$0.02, P=0.13), and knee (0.85$\pm$0.04 vs. 0.69$\pm$0.05, P=0.001). Saliency maps were consistently more precise and anatomically correct for MST than for ResNet. Self-supervised 2D models like DINOv2 can be effectively adapted for 3D medical imaging using MST, offering enhanced diagnostic accuracy and explainability compared to convolutional neural networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15802v1-abstract-full').style.display = 'none'; document.getElementById('2411.15802v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13623">arXiv:2411.13623</a> <span> [<a href="https://arxiv.org/pdf/2411.13623">pdf</a>, <a href="https://arxiv.org/format/2411.13623">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Unsupervised Foundation Model-Agnostic Slide-Level Representation Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lenz%2C+T">Tim Lenz</a>, <a href="/search/cs?searchtype=author&query=Neidlinger%2C+P">Peter Neidlinger</a>, <a href="/search/cs?searchtype=author&query=Ligero%2C+M">Marta Ligero</a>, <a href="/search/cs?searchtype=author&query=W%C3%B6lflein%2C+G">Georg W枚lflein</a>, <a href="/search/cs?searchtype=author&query=van+Treeck%2C+M">Marko van Treeck</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13623v2-abstract-short" style="display: inline;"> Representation learning of pathology whole-slide images(WSIs) has primarily relied on weak supervision with Multiple Instance Learning (MIL). This approach leads to slide representations highly tailored to a specific clinical task. Self-supervised learning (SSL) has been successfully applied to train histopathology foundation models (FMs) for patch embedding generation. However, generating patient… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13623v2-abstract-full').style.display = 'inline'; document.getElementById('2411.13623v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13623v2-abstract-full" style="display: none;"> Representation learning of pathology whole-slide images(WSIs) has primarily relied on weak supervision with Multiple Instance Learning (MIL). This approach leads to slide representations highly tailored to a specific clinical task. Self-supervised learning (SSL) has been successfully applied to train histopathology foundation models (FMs) for patch embedding generation. However, generating patient or slide level embeddings remains challenging. Existing approaches for slide representation learning extend the principles of SSL from patch level learning to entire slides by aligning different augmentations of the slide or by utilizing multimodal data. By integrating tile embeddings from multiple FMs, we propose a new single modality SSL method in feature space that generates useful slide representations. Our contrastive pretraining strategy, called COBRA, employs multiple FMs and an architecture based on Mamba-2. COBRA exceeds performance of state-of-the-art slide encoders on four different public Clinical Protemic Tumor Analysis Consortium (CPTAC) cohorts on average by at least +4.5% AUC, despite only being pretrained on 3048 WSIs from The Cancer Genome Atlas (TCGA). Additionally, COBRA is readily compatible at inference time with previously unseen feature extractors. Code available at https://github.com/KatherLab/COBRA. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13623v2-abstract-full').style.display = 'none'; document.getElementById('2411.13623v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.15012">arXiv:2410.15012</a> <span> [<a href="https://arxiv.org/pdf/2410.15012">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Pathologist-like explainable AI for interpretable Gleason grading in prostate cancer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mittmann%2C+G">Gesa Mittmann</a>, <a href="/search/cs?searchtype=author&query=Laiouar-Pedari%2C+S">Sara Laiouar-Pedari</a>, <a href="/search/cs?searchtype=author&query=Mehrtens%2C+H+A">Hendrik A. Mehrtens</a>, <a href="/search/cs?searchtype=author&query=Haggenm%C3%BCller%2C+S">Sarah Haggenm眉ller</a>, <a href="/search/cs?searchtype=author&query=Bucher%2C+T">Tabea-Clara Bucher</a>, <a href="/search/cs?searchtype=author&query=Chanda%2C+T">Tirtha Chanda</a>, <a href="/search/cs?searchtype=author&query=Gaisa%2C+N+T">Nadine T. Gaisa</a>, <a href="/search/cs?searchtype=author&query=Wagner%2C+M">Mathias Wagner</a>, <a href="/search/cs?searchtype=author&query=Klamminger%2C+G+G">Gilbert Georg Klamminger</a>, <a href="/search/cs?searchtype=author&query=Rau%2C+T+T">Tilman T. Rau</a>, <a href="/search/cs?searchtype=author&query=Neppl%2C+C">Christina Neppl</a>, <a href="/search/cs?searchtype=author&query=Comp%C3%A9rat%2C+E+M">Eva Maria Comp茅rat</a>, <a href="/search/cs?searchtype=author&query=Gocht%2C+A">Andreas Gocht</a>, <a href="/search/cs?searchtype=author&query=H%C3%A4mmerle%2C+M">Monika H盲mmerle</a>, <a href="/search/cs?searchtype=author&query=Rupp%2C+N+J">Niels J. Rupp</a>, <a href="/search/cs?searchtype=author&query=Westhoff%2C+J">Jula Westhoff</a>, <a href="/search/cs?searchtype=author&query=Kr%C3%BCcken%2C+I">Irene Kr眉cken</a>, <a href="/search/cs?searchtype=author&query=Seidl%2C+M">Maximillian Seidl</a>, <a href="/search/cs?searchtype=author&query=Sch%C3%BCrch%2C+C+M">Christian M. Sch眉rch</a>, <a href="/search/cs?searchtype=author&query=Bauer%2C+M">Marcus Bauer</a>, <a href="/search/cs?searchtype=author&query=Solass%2C+W">Wiebke Solass</a>, <a href="/search/cs?searchtype=author&query=Tam%2C+Y+C">Yu Chun Tam</a>, <a href="/search/cs?searchtype=author&query=Weber%2C+F">Florian Weber</a>, <a href="/search/cs?searchtype=author&query=Grobholz%2C+R">Rainer Grobholz</a>, <a href="/search/cs?searchtype=author&query=Augustyniak%2C+J">Jaroslaw Augustyniak</a> , et al. (41 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.15012v1-abstract-short" style="display: inline;"> The aggressiveness of prostate cancer, the most common cancer in men worldwide, is primarily assessed based on histopathological data using the Gleason scoring system. While artificial intelligence (AI) has shown promise in accurately predicting Gleason scores, these predictions often lack inherent explainability, potentially leading to distrust in human-machine interactions. To address this issue… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15012v1-abstract-full').style.display = 'inline'; document.getElementById('2410.15012v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.15012v1-abstract-full" style="display: none;"> The aggressiveness of prostate cancer, the most common cancer in men worldwide, is primarily assessed based on histopathological data using the Gleason scoring system. While artificial intelligence (AI) has shown promise in accurately predicting Gleason scores, these predictions often lack inherent explainability, potentially leading to distrust in human-machine interactions. To address this issue, we introduce a novel dataset of 1,015 tissue microarray core images, annotated by an international group of 54 pathologists. The annotations provide detailed localized pattern descriptions for Gleason grading in line with international guidelines. Utilizing this dataset, we develop an inherently explainable AI system based on a U-Net architecture that provides predictions leveraging pathologists' terminology. This approach circumvents post-hoc explainability methods while maintaining or exceeding the performance of methods trained directly for Gleason pattern segmentation (Dice score: 0.713 $\pm$ 0.003 trained on explanations vs. 0.691 $\pm$ 0.010 trained on Gleason patterns). By employing soft labels during training, we capture the intrinsic uncertainty in the data, yielding strong results in Gleason pattern segmentation even in the context of high interobserver variability. With the release of this dataset, we aim to encourage further research into segmentation in medical tasks with high levels of subjectivity and to advance the understanding of pathologists' reasoning processes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15012v1-abstract-full').style.display = 'none'; document.getElementById('2410.15012v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">58 pages, 15 figures (incl. supplementary)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.13476">arXiv:2409.13476</a> <span> [<a href="https://arxiv.org/pdf/2409.13476">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Dermatologist-like explainable AI enhances melanoma diagnosis accuracy: eye-tracking study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chanda%2C+T">Tirtha Chanda</a>, <a href="/search/cs?searchtype=author&query=Haggenmueller%2C+S">Sarah Haggenmueller</a>, <a href="/search/cs?searchtype=author&query=Bucher%2C+T">Tabea-Clara Bucher</a>, <a href="/search/cs?searchtype=author&query=Holland-Letz%2C+T">Tim Holland-Letz</a>, <a href="/search/cs?searchtype=author&query=Kittler%2C+H">Harald Kittler</a>, <a href="/search/cs?searchtype=author&query=Tschandl%2C+P">Philipp Tschandl</a>, <a href="/search/cs?searchtype=author&query=Heppt%2C+M+V">Markus V. Heppt</a>, <a href="/search/cs?searchtype=author&query=Berking%2C+C">Carola Berking</a>, <a href="/search/cs?searchtype=author&query=Utikal%2C+J+S">Jochen S. Utikal</a>, <a href="/search/cs?searchtype=author&query=Schilling%2C+B">Bastian Schilling</a>, <a href="/search/cs?searchtype=author&query=Buerger%2C+C">Claudia Buerger</a>, <a href="/search/cs?searchtype=author&query=Navarrete-Dechent%2C+C">Cristian Navarrete-Dechent</a>, <a href="/search/cs?searchtype=author&query=Goebeler%2C+M">Matthias Goebeler</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Schneider%2C+C+V">Carolin V. Schneider</a>, <a href="/search/cs?searchtype=author&query=Durani%2C+B">Benjamin Durani</a>, <a href="/search/cs?searchtype=author&query=Durani%2C+H">Hendrike Durani</a>, <a href="/search/cs?searchtype=author&query=Jansen%2C+M">Martin Jansen</a>, <a href="/search/cs?searchtype=author&query=Wacker%2C+J">Juliane Wacker</a>, <a href="/search/cs?searchtype=author&query=Wacker%2C+J">Joerg Wacker</a>, <a href="/search/cs?searchtype=author&query=Consortium%2C+R+S">Reader Study Consortium</a>, <a href="/search/cs?searchtype=author&query=Brinker%2C+T+J">Titus J. Brinker</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.13476v1-abstract-short" style="display: inline;"> Artificial intelligence (AI) systems have substantially improved dermatologists' diagnostic accuracy for melanoma, with explainable AI (XAI) systems further enhancing clinicians' confidence and trust in AI-driven decisions. Despite these advancements, there remains a critical need for objective evaluation of how dermatologists engage with both AI and XAI tools. In this study, 76 dermatologists par… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13476v1-abstract-full').style.display = 'inline'; document.getElementById('2409.13476v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.13476v1-abstract-full" style="display: none;"> Artificial intelligence (AI) systems have substantially improved dermatologists' diagnostic accuracy for melanoma, with explainable AI (XAI) systems further enhancing clinicians' confidence and trust in AI-driven decisions. Despite these advancements, there remains a critical need for objective evaluation of how dermatologists engage with both AI and XAI tools. In this study, 76 dermatologists participated in a reader study, diagnosing 16 dermoscopic images of melanomas and nevi using an XAI system that provides detailed, domain-specific explanations. Eye-tracking technology was employed to assess their interactions. Diagnostic performance was compared with that of a standard AI system lacking explanatory features. Our findings reveal that XAI systems improved balanced diagnostic accuracy by 2.8 percentage points relative to standard AI. Moreover, diagnostic disagreements with AI/XAI systems and complex lesions were associated with elevated cognitive load, as evidenced by increased ocular fixations. These insights have significant implications for clinical practice, the design of AI tools for visual tasks, and the broader development of XAI in medical diagnostics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13476v1-abstract-full').style.display = 'none'; document.getElementById('2409.13476v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.00544">arXiv:2409.00544</a> <span> [<a href="https://arxiv.org/pdf/2409.00544">pdf</a>, <a href="https://arxiv.org/format/2409.00544">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Large Language Models-Enabled Digital Twins for Precision Medicine in Rare Gynecological Tumors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lammert%2C+J">Jacqueline Lammert</a>, <a href="/search/cs?searchtype=author&query=Pfarr%2C+N">Nicole Pfarr</a>, <a href="/search/cs?searchtype=author&query=Kuligin%2C+L">Leonid Kuligin</a>, <a href="/search/cs?searchtype=author&query=Mathes%2C+S">Sonja Mathes</a>, <a href="/search/cs?searchtype=author&query=Dreyer%2C+T">Tobias Dreyer</a>, <a href="/search/cs?searchtype=author&query=Modersohn%2C+L">Luise Modersohn</a>, <a href="/search/cs?searchtype=author&query=Metzger%2C+P">Patrick Metzger</a>, <a href="/search/cs?searchtype=author&query=Ferber%2C+D">Dyke Ferber</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Adams%2C+L+C">Lisa Christine Adams</a>, <a href="/search/cs?searchtype=author&query=Bressem%2C+K+K">Keno Kyrill Bressem</a>, <a href="/search/cs?searchtype=author&query=Lange%2C+S">Sebastian Lange</a>, <a href="/search/cs?searchtype=author&query=Schwamborn%2C+K">Kristina Schwamborn</a>, <a href="/search/cs?searchtype=author&query=Boeker%2C+M">Martin Boeker</a>, <a href="/search/cs?searchtype=author&query=Kiechle%2C+M">Marion Kiechle</a>, <a href="/search/cs?searchtype=author&query=Schatz%2C+U+A">Ulrich A. Schatz</a>, <a href="/search/cs?searchtype=author&query=Bronger%2C+H">Holger Bronger</a>, <a href="/search/cs?searchtype=author&query=Tschochohei%2C+M">Maximilian Tschochohei</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.00544v1-abstract-short" style="display: inline;"> Rare gynecological tumors (RGTs) present major clinical challenges due to their low incidence and heterogeneity. The lack of clear guidelines leads to suboptimal management and poor prognosis. Molecular tumor boards accelerate access to effective therapies by tailoring treatment based on biomarkers, beyond cancer type. Unstructured data that requires manual curation hinders efficient use of biomar… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.00544v1-abstract-full').style.display = 'inline'; document.getElementById('2409.00544v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.00544v1-abstract-full" style="display: none;"> Rare gynecological tumors (RGTs) present major clinical challenges due to their low incidence and heterogeneity. The lack of clear guidelines leads to suboptimal management and poor prognosis. Molecular tumor boards accelerate access to effective therapies by tailoring treatment based on biomarkers, beyond cancer type. Unstructured data that requires manual curation hinders efficient use of biomarker profiling for therapy matching. This study explores the use of large language models (LLMs) to construct digital twins for precision medicine in RGTs. Our proof-of-concept digital twin system integrates clinical and biomarker data from institutional and published cases (n=21) and literature-derived data (n=655 publications with n=404,265 patients) to create tailored treatment plans for metastatic uterine carcinosarcoma, identifying options potentially missed by traditional, single-source analysis. LLM-enabled digital twins efficiently model individual patient trajectories. Shifting to a biology-based rather than organ-based tumor definition enables personalized care that could advance RGT management and thus enhance patient outcomes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.00544v1-abstract-full').style.display = 'none'; document.getElementById('2409.00544v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 2 figures, 3 tables, supplements, original article</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.15823">arXiv:2408.15823</a> <span> [<a href="https://arxiv.org/pdf/2408.15823">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Benchmarking foundation models as feature extractors for weakly-supervised computational pathology </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Neidlinger%2C+P">Peter Neidlinger</a>, <a href="/search/cs?searchtype=author&query=Nahhas%2C+O+S+M+E">Omar S. M. El Nahhas</a>, <a href="/search/cs?searchtype=author&query=Muti%2C+H+S">Hannah Sophie Muti</a>, <a href="/search/cs?searchtype=author&query=Lenz%2C+T">Tim Lenz</a>, <a href="/search/cs?searchtype=author&query=Hoffmeister%2C+M">Michael Hoffmeister</a>, <a href="/search/cs?searchtype=author&query=Brenner%2C+H">Hermann Brenner</a>, <a href="/search/cs?searchtype=author&query=van+Treeck%2C+M">Marko van Treeck</a>, <a href="/search/cs?searchtype=author&query=Langer%2C+R">Rupert Langer</a>, <a href="/search/cs?searchtype=author&query=Dislich%2C+B">Bastian Dislich</a>, <a href="/search/cs?searchtype=author&query=Behrens%2C+H+M">Hans Michael Behrens</a>, <a href="/search/cs?searchtype=author&query=R%C3%B6cken%2C+C">Christoph R枚cken</a>, <a href="/search/cs?searchtype=author&query=Foersch%2C+S">Sebastian Foersch</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Marra%2C+A">Antonio Marra</a>, <a href="/search/cs?searchtype=author&query=Saldanha%2C+O+L">Oliver Lester Saldanha</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.15823v2-abstract-short" style="display: inline;"> Advancements in artificial intelligence have driven the development of numerous pathology foundation models capable of extracting clinically relevant information. However, there is currently limited literature independently evaluating these foundation models on truly external cohorts and clinically-relevant tasks to uncover adjustments for future improvements. In this study, we benchmarked 19 hist… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.15823v2-abstract-full').style.display = 'inline'; document.getElementById('2408.15823v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.15823v2-abstract-full" style="display: none;"> Advancements in artificial intelligence have driven the development of numerous pathology foundation models capable of extracting clinically relevant information. However, there is currently limited literature independently evaluating these foundation models on truly external cohorts and clinically-relevant tasks to uncover adjustments for future improvements. In this study, we benchmarked 19 histopathology foundation models on 13 patient cohorts with 6,818 patients and 9,528 slides from lung, colorectal, gastric, and breast cancers. The models were evaluated on weakly-supervised tasks related to biomarkers, morphological properties, and prognostic outcomes. We show that a vision-language foundation model, CONCH, yielded the highest performance when compared to vision-only foundation models, with Virchow2 as close second. The experiments reveal that foundation models trained on distinct cohorts learn complementary features to predict the same label, and can be fused to outperform the current state of the art. An ensemble combining CONCH and Virchow2 predictions outperformed individual models in 55% of tasks, leveraging their complementary strengths in classification scenarios. Moreover, our findings suggest that data diversity outweighs data volume for foundation models. Our work highlights actionable adjustments to improve pathology foundation models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.15823v2-abstract-full').style.display = 'none'; document.getElementById('2408.15823v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.18981">arXiv:2407.18981</a> <span> [<a href="https://arxiv.org/pdf/2407.18981">pdf</a>, <a href="https://arxiv.org/format/2407.18981">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Prompt Injection Attacks on Large Language Models in Oncology </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Clusmann%2C+J">Jan Clusmann</a>, <a href="/search/cs?searchtype=author&query=Ferber%2C+D">Dyke Ferber</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+I+C">Isabella C. Wiest</a>, <a href="/search/cs?searchtype=author&query=Schneider%2C+C+V">Carolin V. Schneider</a>, <a href="/search/cs?searchtype=author&query=Brinker%2C+T+J">Titus J. Brinker</a>, <a href="/search/cs?searchtype=author&query=Foersch%2C+S">Sebastian Foersch</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob N. Kather</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.18981v1-abstract-short" style="display: inline;"> Vision-language artificial intelligence models (VLMs) possess medical knowledge and can be employed in healthcare in numerous ways, including as image interpreters, virtual scribes, and general decision support systems. However, here, we demonstrate that current VLMs applied to medical tasks exhibit a fundamental security flaw: they can be attacked by prompt injection attacks, which can be used to… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.18981v1-abstract-full').style.display = 'inline'; document.getElementById('2407.18981v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.18981v1-abstract-full" style="display: none;"> Vision-language artificial intelligence models (VLMs) possess medical knowledge and can be employed in healthcare in numerous ways, including as image interpreters, virtual scribes, and general decision support systems. However, here, we demonstrate that current VLMs applied to medical tasks exhibit a fundamental security flaw: they can be attacked by prompt injection attacks, which can be used to output harmful information just by interacting with the VLM, without any access to its parameters. We performed a quantitative study to evaluate the vulnerabilities to these attacks in four state of the art VLMs which have been proposed to be of utility in healthcare: Claude 3 Opus, Claude 3.5 Sonnet, Reka Core, and GPT-4o. Using a set of N=297 attacks, we show that all of these models are susceptible. Specifically, we show that embedding sub-visual prompts in medical imaging data can cause the model to provide harmful output, and that these prompts are non-obvious to human observers. Thus, our study demonstrates a key vulnerability in medical VLMs which should be mitigated before widespread clinical adoption. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.18981v1-abstract-full').style.display = 'none'; document.getElementById('2407.18981v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">57 Pages, 5 Figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.15621">arXiv:2407.15621</a> <span> [<a href="https://arxiv.org/pdf/2407.15621">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> RadioRAG: Factual large language models for enhanced diagnostics in radiology using online retrieval augmented generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Arasteh%2C+S+T">Soroosh Tayebi Arasteh</a>, <a href="/search/cs?searchtype=author&query=Lotfinia%2C+M">Mahshad Lotfinia</a>, <a href="/search/cs?searchtype=author&query=Bressem%2C+K">Keno Bressem</a>, <a href="/search/cs?searchtype=author&query=Siepmann%2C+R">Robert Siepmann</a>, <a href="/search/cs?searchtype=author&query=Adams%2C+L">Lisa Adams</a>, <a href="/search/cs?searchtype=author&query=Ferber%2C+D">Dyke Ferber</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.15621v2-abstract-short" style="display: inline;"> Large language models (LLMs) often generate outdated or inaccurate information based on static training datasets. Retrieval augmented generation (RAG) mitigates this by integrating outside data sources. While previous RAG systems used pre-assembled, fixed databases with limited flexibility, we have developed Radiology RAG (RadioRAG), an end-to-end framework that retrieves data from authoritative r… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.15621v2-abstract-full').style.display = 'inline'; document.getElementById('2407.15621v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.15621v2-abstract-full" style="display: none;"> Large language models (LLMs) often generate outdated or inaccurate information based on static training datasets. Retrieval augmented generation (RAG) mitigates this by integrating outside data sources. While previous RAG systems used pre-assembled, fixed databases with limited flexibility, we have developed Radiology RAG (RadioRAG), an end-to-end framework that retrieves data from authoritative radiologic online sources in real-time. We evaluate the diagnostic accuracy of various LLMs when answering radiology-specific questions with and without access to additional online information via RAG. Using 80 questions from the RSNA Case Collection across radiologic subspecialties and 24 additional expert-curated questions with reference standard answers, LLMs (GPT-3.5-turbo, GPT-4, Mistral-7B, Mixtral-8x7B, and Llama3 [8B and 70B]) were prompted with and without RadioRAG in a zero-shot inference scenario RadioRAG retrieved context-specific information from www.radiopaedia.org in real-time. Accuracy was investigated. Statistical analyses were performed using bootstrapping. The results were further compared with human performance. RadioRAG improved diagnostic accuracy across most LLMs, with relative accuracy increases ranging up to 54% for different LLMs. It matched or exceeded non-RAG models and the human radiologist in question answering across radiologic subspecialties, particularly in breast imaging and emergency radiology. However, the degree of improvement varied among models; GPT-3.5-turbo and Mixtral-8x7B-instruct-v0.1 saw notable gains, while Mistral-7B-instruct-v0.2 showed no improvement, highlighting variability in RadioRAG's effectiveness. LLMs benefit when provided access to domain-specific data beyond their training data. For radiology, RadioRAG establishes a robust framework that substantially improves diagnostic accuracy and factuality in radiological question answering. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.15621v2-abstract-full').style.display = 'none'; document.getElementById('2407.15621v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.13463">arXiv:2407.13463</a> <span> [<a href="https://arxiv.org/pdf/2407.13463">pdf</a>, <a href="https://arxiv.org/format/2407.13463">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> End-To-End Clinical Trial Matching with Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ferber%2C+D">Dyke Ferber</a>, <a href="/search/cs?searchtype=author&query=Hilgers%2C+L">Lars Hilgers</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+I+C">Isabella C. Wiest</a>, <a href="/search/cs?searchtype=author&query=Le%C3%9Fmann%2C+M">Marie-Elisabeth Le脽mann</a>, <a href="/search/cs?searchtype=author&query=Clusmann%2C+J">Jan Clusmann</a>, <a href="/search/cs?searchtype=author&query=Neidlinger%2C+P">Peter Neidlinger</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+J">Jiefu Zhu</a>, <a href="/search/cs?searchtype=author&query=W%C3%B6lflein%2C+G">Georg W枚lflein</a>, <a href="/search/cs?searchtype=author&query=Lammert%2C+J">Jacqueline Lammert</a>, <a href="/search/cs?searchtype=author&query=Tschochohei%2C+M">Maximilian Tschochohei</a>, <a href="/search/cs?searchtype=author&query=B%C3%B6hme%2C+H">Heiko B枚hme</a>, <a href="/search/cs?searchtype=author&query=J%C3%A4ger%2C+D">Dirk J盲ger</a>, <a href="/search/cs?searchtype=author&query=Aldea%2C+M">Mihaela Aldea</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=H%C3%B6per%2C+C">Christiane H枚per</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.13463v1-abstract-short" style="display: inline;"> Matching cancer patients to clinical trials is essential for advancing treatment and patient care. However, the inconsistent format of medical free text documents and complex trial eligibility criteria make this process extremely challenging and time-consuming for physicians. We investigated whether the entire trial matching process - from identifying relevant trials among 105,600 oncology-related… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.13463v1-abstract-full').style.display = 'inline'; document.getElementById('2407.13463v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.13463v1-abstract-full" style="display: none;"> Matching cancer patients to clinical trials is essential for advancing treatment and patient care. However, the inconsistent format of medical free text documents and complex trial eligibility criteria make this process extremely challenging and time-consuming for physicians. We investigated whether the entire trial matching process - from identifying relevant trials among 105,600 oncology-related clinical trials on clinicaltrials.gov to generating criterion-level eligibility matches - could be automated using Large Language Models (LLMs). Using GPT-4o and a set of 51 synthetic Electronic Health Records (EHRs), we demonstrate that our approach identifies relevant candidate trials in 93.3% of cases and achieves a preliminary accuracy of 88.0% when matching patient-level information at the criterion level against a baseline defined by human experts. Utilizing LLM feedback reveals that 39.3% criteria that were initially considered incorrect are either ambiguous or inaccurately annotated, leading to a total model accuracy of 92.7% after refining our human baseline. In summary, we present an end-to-end pipeline for clinical trial matching using LLMs, demonstrating high precision in screening and matching trials to individual patients, even outperforming the performance of qualified medical doctors. Our fully end-to-end pipeline can operate autonomously or with human supervision and is not restricted to oncology, offering a scalable solution for enhancing patient-trial matching in real-world settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.13463v1-abstract-full').style.display = 'none'; document.getElementById('2407.13463v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">149 pages, including Supplements. 3 Main Figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.16983">arXiv:2406.16983</a> <span> [<a href="https://arxiv.org/pdf/2406.16983">pdf</a>, <a href="https://arxiv.org/format/2406.16983">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> On Instabilities of Unsupervised Denoising Diffusion Models in Magnetic Resonance Imaging Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.16983v1-abstract-short" style="display: inline;"> Denoising diffusion models offer a promising approach to accelerating magnetic resonance imaging (MRI) and producing diagnostic-level images in an unsupervised manner. However, our study demonstrates that even tiny worst-case potential perturbations transferred from a surrogate model can cause these models to generate fake tissue structures that may mislead clinicians. The transferability of such… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.16983v1-abstract-full').style.display = 'inline'; document.getElementById('2406.16983v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.16983v1-abstract-full" style="display: none;"> Denoising diffusion models offer a promising approach to accelerating magnetic resonance imaging (MRI) and producing diagnostic-level images in an unsupervised manner. However, our study demonstrates that even tiny worst-case potential perturbations transferred from a surrogate model can cause these models to generate fake tissue structures that may mislead clinicians. The transferability of such worst-case perturbations indicates that the robustness of image reconstruction may be compromised due to MR system imperfections or other sources of noise. Moreover, at larger perturbation strengths, diffusion models exhibit Gaussian noise-like artifacts that are distinct from those observed in supervised models and are more challenging to detect. Our results highlight the vulnerability of current state-of-the-art diffusion-based reconstruction models to possible worst-case perturbations and underscore the need for further research to improve their robustness and reliability in clinical settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.16983v1-abstract-full').style.display = 'none'; document.getElementById('2406.16983v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.01314">arXiv:2406.01314</a> <span> [<a href="https://arxiv.org/pdf/2406.01314">pdf</a>, <a href="https://arxiv.org/format/2406.01314">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Compute-Efficient Medical Image Classification with Softmax-Free Transformers and Sequence Normalization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Nahhas%2C+O+S+M+E">Omar S. M. El Nahhas</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=M%C3%BCller-Franzes%2C+G">Gustav M眉ller-Franzes</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.01314v1-abstract-short" style="display: inline;"> The Transformer model has been pivotal in advancing fields such as natural language processing, speech recognition, and computer vision. However, a critical limitation of this model is its quadratic computational and memory complexity relative to the sequence length, which constrains its application to longer sequences. This is especially crucial in medical imaging where high-resolution images can… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.01314v1-abstract-full').style.display = 'inline'; document.getElementById('2406.01314v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.01314v1-abstract-full" style="display: none;"> The Transformer model has been pivotal in advancing fields such as natural language processing, speech recognition, and computer vision. However, a critical limitation of this model is its quadratic computational and memory complexity relative to the sequence length, which constrains its application to longer sequences. This is especially crucial in medical imaging where high-resolution images can reach gigapixel scale. Efforts to address this issue have predominantely focused on complex techniques, such as decomposing the softmax operation integral to the Transformer's architecture. This paper addresses this quadratic computational complexity of Transformer models and introduces a remarkably simple and effective method that circumvents this issue by eliminating the softmax function from the attention mechanism and adopting a sequence normalization technique for the key, query, and value tokens. Coupled with a reordering of matrix multiplications this approach reduces the memory- and compute complexity to a linear scale. We evaluate this approach across various medical imaging datasets comprising fundoscopic, dermascopic, radiologic and histologic imaging data. Our findings highlight that these models exhibit a comparable performance to traditional transformer models, while efficiently handling longer sequences. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.01314v1-abstract-full').style.display = 'none'; document.getElementById('2406.01314v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.04667">arXiv:2404.04667</a> <span> [<a href="https://arxiv.org/pdf/2404.04667">pdf</a>, <a href="https://arxiv.org/format/2404.04667">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Tissues and Organs">q-bio.TO</span> </div> </div> <p class="title is-5 mathjax"> Autonomous Artificial Intelligence Agents for Clinical Decision Making in Oncology </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ferber%2C+D">Dyke Ferber</a>, <a href="/search/cs?searchtype=author&query=Nahhas%2C+O+S+M+E">Omar S. M. El Nahhas</a>, <a href="/search/cs?searchtype=author&query=W%C3%B6lflein%2C+G">Georg W枚lflein</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+I+C">Isabella C. Wiest</a>, <a href="/search/cs?searchtype=author&query=Clusmann%2C+J">Jan Clusmann</a>, <a href="/search/cs?searchtype=author&query=Le%C3%9Fman%2C+M">Marie-Elisabeth Le脽man</a>, <a href="/search/cs?searchtype=author&query=Foersch%2C+S">Sebastian Foersch</a>, <a href="/search/cs?searchtype=author&query=Lammert%2C+J">Jacqueline Lammert</a>, <a href="/search/cs?searchtype=author&query=Tschochohei%2C+M">Maximilian Tschochohei</a>, <a href="/search/cs?searchtype=author&query=J%C3%A4ger%2C+D">Dirk J盲ger</a>, <a href="/search/cs?searchtype=author&query=Salto-Tellez%2C+M">Manuel Salto-Tellez</a>, <a href="/search/cs?searchtype=author&query=Schultz%2C+N">Nikolaus Schultz</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.04667v1-abstract-short" style="display: inline;"> Multimodal artificial intelligence (AI) systems have the potential to enhance clinical decision-making by interpreting various types of medical data. However, the effectiveness of these models across all medical fields is uncertain. Each discipline presents unique challenges that need to be addressed for optimal performance. This complexity is further increased when attempting to integrate differe… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.04667v1-abstract-full').style.display = 'inline'; document.getElementById('2404.04667v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.04667v1-abstract-full" style="display: none;"> Multimodal artificial intelligence (AI) systems have the potential to enhance clinical decision-making by interpreting various types of medical data. However, the effectiveness of these models across all medical fields is uncertain. Each discipline presents unique challenges that need to be addressed for optimal performance. This complexity is further increased when attempting to integrate different fields into a single model. Here, we introduce an alternative approach to multimodal medical AI that utilizes the generalist capabilities of a large language model (LLM) as a central reasoning engine. This engine autonomously coordinates and deploys a set of specialized medical AI tools. These tools include text, radiology and histopathology image interpretation, genomic data processing, web searches, and document retrieval from medical guidelines. We validate our system across a series of clinical oncology scenarios that closely resemble typical patient care workflows. We show that the system has a high capability in employing appropriate tools (97%), drawing correct conclusions (93.6%), and providing complete (94%), and helpful (89.2%) recommendations for individual patient cases while consistently referencing relevant literature (82.5%) upon instruction. This work provides evidence that LLMs can effectively plan and execute domain-specific models to retrieve or synthesize new information when used as autonomous agents. This enables them to function as specialist, patient-tailored clinical assistants. It also simplifies regulatory compliance by allowing each component tool to be individually validated and approved. We believe, that our work can serve as a proof-of-concept for more advanced LLM-agents in the medical domain. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.04667v1-abstract-full').style.display = 'none'; document.getElementById('2404.04667v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">91 pages, 2 Figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.07407">arXiv:2403.07407</a> <span> [<a href="https://arxiv.org/pdf/2403.07407">pdf</a>, <a href="https://arxiv.org/format/2403.07407">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> In-context learning enables multimodal large language models to classify cancer pathology images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ferber%2C+D">Dyke Ferber</a>, <a href="/search/cs?searchtype=author&query=W%C3%B6lflein%2C+G">Georg W枚lflein</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+I+C">Isabella C. Wiest</a>, <a href="/search/cs?searchtype=author&query=Ligero%2C+M">Marta Ligero</a>, <a href="/search/cs?searchtype=author&query=Sainath%2C+S">Srividhya Sainath</a>, <a href="/search/cs?searchtype=author&query=Laleh%2C+N+G">Narmin Ghaffari Laleh</a>, <a href="/search/cs?searchtype=author&query=Nahhas%2C+O+S+M+E">Omar S. M. El Nahhas</a>, <a href="/search/cs?searchtype=author&query=M%C3%BCller-Franzes%2C+G">Gustav M眉ller-Franzes</a>, <a href="/search/cs?searchtype=author&query=J%C3%A4ger%2C+D">Dirk J盲ger</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.07407v1-abstract-short" style="display: inline;"> Medical image classification requires labeled, task-specific datasets which are used to train deep learning networks de novo, or to fine-tune foundation models. However, this process is computationally and technically demanding. In language processing, in-context learning provides an alternative, where models learn from within prompts, bypassing the need for parameter updates. Yet, in-context lear… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07407v1-abstract-full').style.display = 'inline'; document.getElementById('2403.07407v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.07407v1-abstract-full" style="display: none;"> Medical image classification requires labeled, task-specific datasets which are used to train deep learning networks de novo, or to fine-tune foundation models. However, this process is computationally and technically demanding. In language processing, in-context learning provides an alternative, where models learn from within prompts, bypassing the need for parameter updates. Yet, in-context learning remains underexplored in medical image analysis. Here, we systematically evaluate the model Generative Pretrained Transformer 4 with Vision capabilities (GPT-4V) on cancer image processing with in-context learning on three cancer histopathology tasks of high importance: Classification of tissue subtypes in colorectal cancer, colon polyp subtyping and breast tumor detection in lymph node sections. Our results show that in-context learning is sufficient to match or even outperform specialized neural networks trained for particular tasks, while only requiring a minimal number of samples. In summary, this study demonstrates that large vision language models trained on non-domain specific data can be applied out-of-the box to solve medical image-processing tasks in histopathology. This democratizes access of generalist AI models to medical experts without technical background especially for areas where annotated data is scarce. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07407v1-abstract-full').style.display = 'none'; document.getElementById('2403.07407v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">40 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.04558">arXiv:2403.04558</a> <span> [<a href="https://arxiv.org/pdf/2403.04558">pdf</a>, <a href="https://arxiv.org/format/2403.04558">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Reducing self-supervised learning complexity improves weakly-supervised classification performance in computational pathology </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lenz%2C+T">Tim Lenz</a>, <a href="/search/cs?searchtype=author&query=Nahhas%2C+O+S+M+E">Omar S. M. El Nahhas</a>, <a href="/search/cs?searchtype=author&query=Ligero%2C+M">Marta Ligero</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.04558v2-abstract-short" style="display: inline;"> Deep Learning models have been successfully utilized to extract clinically actionable insights from routinely available histology data. Generally, these models require annotations performed by clinicians, which are scarce and costly to generate. The emergence of self-supervised learning (SSL) methods remove this barrier, allowing for large-scale analyses on non-annotated data. However, recent SSL… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.04558v2-abstract-full').style.display = 'inline'; document.getElementById('2403.04558v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.04558v2-abstract-full" style="display: none;"> Deep Learning models have been successfully utilized to extract clinically actionable insights from routinely available histology data. Generally, these models require annotations performed by clinicians, which are scarce and costly to generate. The emergence of self-supervised learning (SSL) methods remove this barrier, allowing for large-scale analyses on non-annotated data. However, recent SSL approaches apply increasingly expansive model architectures and larger datasets, causing the rapid escalation of data volumes, hardware prerequisites, and overall expenses, limiting access to these resources to few institutions. Therefore, we investigated the complexity of contrastive SSL in computational pathology in relation to classification performance with the utilization of consumer-grade hardware. Specifically, we analyzed the effects of adaptations in data volume, architecture, and algorithms on downstream classification tasks, emphasizing their impact on computational resources. We trained breast cancer foundation models on a large public patient cohort and validated them on various downstream classification tasks in a weakly supervised manner on two external public patient cohorts. Our experiments demonstrate that we can improve downstream classification performance whilst reducing SSL training duration by 90%. In summary, we propose a set of adaptations which enable the utilization of SSL in computational pathology in non-resource abundant environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.04558v2-abstract-full').style.display = 'none'; document.getElementById('2403.04558v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to MICCAI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.03891">arXiv:2403.03891</a> <span> [<a href="https://arxiv.org/pdf/2403.03891">pdf</a>, <a href="https://arxiv.org/format/2403.03891">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Joint multi-task learning improves weakly-supervised biomarker prediction in computational pathology </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Nahhas%2C+O+S+M+E">Omar S. M. El Nahhas</a>, <a href="/search/cs?searchtype=author&query=W%C3%B6lflein%2C+G">Georg W枚lflein</a>, <a href="/search/cs?searchtype=author&query=Ligero%2C+M">Marta Ligero</a>, <a href="/search/cs?searchtype=author&query=Lenz%2C+T">Tim Lenz</a>, <a href="/search/cs?searchtype=author&query=van+Treeck%2C+M">Marko van Treeck</a>, <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.03891v1-abstract-short" style="display: inline;"> Deep Learning (DL) can predict biomarkers directly from digitized cancer histology in a weakly-supervised setting. Recently, the prediction of continuous biomarkers through regression-based DL has seen an increasing interest. Nonetheless, clinical decision making often requires a categorical outcome. Consequently, we developed a weakly-supervised joint multi-task Transformer architecture which has… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.03891v1-abstract-full').style.display = 'inline'; document.getElementById('2403.03891v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.03891v1-abstract-full" style="display: none;"> Deep Learning (DL) can predict biomarkers directly from digitized cancer histology in a weakly-supervised setting. Recently, the prediction of continuous biomarkers through regression-based DL has seen an increasing interest. Nonetheless, clinical decision making often requires a categorical outcome. Consequently, we developed a weakly-supervised joint multi-task Transformer architecture which has been trained and evaluated on four public patient cohorts for the prediction of two key predictive biomarkers, microsatellite instability (MSI) and homologous recombination deficiency (HRD), trained with auxiliary regression tasks related to the tumor microenvironment. Moreover, we perform a comprehensive benchmark of 16 approaches of task balancing for weakly-supervised joint multi-task learning in computational pathology. Using our novel approach, we improve over the state-of-the-art area under the receiver operating characteristic by +7.7% and +4.1%, as well as yielding better clustering of latent embeddings by +8% and +5% for the prediction of MSI and HRD in external cohorts, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.03891v1-abstract-full').style.display = 'none'; document.getElementById('2403.03891v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.01054">arXiv:2402.01054</a> <span> [<a href="https://arxiv.org/pdf/2402.01054">pdf</a>, <a href="https://arxiv.org/format/2402.01054">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Unconditional Latent Diffusion Models Memorize Patient Imaging Data: Implications for Openly Sharing Synthetic Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Dar%2C+S+U+H">Salman Ul Hassan Dar</a>, <a href="/search/cs?searchtype=author&query=Seyfarth%2C+M">Marvin Seyfarth</a>, <a href="/search/cs?searchtype=author&query=Ayx%2C+I">Isabelle Ayx</a>, <a href="/search/cs?searchtype=author&query=Papavassiliu%2C+T">Theano Papavassiliu</a>, <a href="/search/cs?searchtype=author&query=Schoenberg%2C+S+O">Stefan O. Schoenberg</a>, <a href="/search/cs?searchtype=author&query=Siepmann%2C+R+M">Robert Malte Siepmann</a>, <a href="/search/cs?searchtype=author&query=Laqua%2C+F+C">Fabian Christopher Laqua</a>, <a href="/search/cs?searchtype=author&query=Kahmann%2C+J">Jannik Kahmann</a>, <a href="/search/cs?searchtype=author&query=Frey%2C+N">Norbert Frey</a>, <a href="/search/cs?searchtype=author&query=Bae%C3%9Fler%2C+B">Bettina Bae脽ler</a>, <a href="/search/cs?searchtype=author&query=Foersch%2C+S">Sebastian Foersch</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Engelhardt%2C+S">Sandy Engelhardt</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.01054v3-abstract-short" style="display: inline;"> AI models present a wide range of applications in the field of medicine. However, achieving optimal performance requires access to extensive healthcare data, which is often not readily available. Furthermore, the imperative to preserve patient privacy restricts patient data sharing with third parties and even within institutes. Recently, generative AI models have been gaining traction for facilita… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.01054v3-abstract-full').style.display = 'inline'; document.getElementById('2402.01054v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.01054v3-abstract-full" style="display: none;"> AI models present a wide range of applications in the field of medicine. However, achieving optimal performance requires access to extensive healthcare data, which is often not readily available. Furthermore, the imperative to preserve patient privacy restricts patient data sharing with third parties and even within institutes. Recently, generative AI models have been gaining traction for facilitating open-data sharing by proposing synthetic data as surrogates of real patient data. Despite the promise, some of these models are susceptible to patient data memorization, where models generate patient data copies instead of novel synthetic samples. Considering the importance of the problem, surprisingly it has received relatively little attention in the medical imaging community. To this end, we assess memorization in unconditional latent diffusion models. We train latent diffusion models on CT, MR, and X-ray datasets for synthetic data generation. We then detect the amount of training data memorized utilizing our novel self-supervised copy detection approach and further investigate various factors that can influence memorization. Our findings show a surprisingly high degree of patient data memorization across all datasets. Comparison with non-diffusion generative models, such as autoencoders and generative adversarial networks, indicates that while latent diffusion models are more susceptible to memorization, overall they outperform non-diffusion models in synthesis quality. Further analyses reveal that using augmentation strategies, small architecture, and increasing dataset can reduce memorization while over-training the models can enhance it. Collectively, our results emphasize the importance of carefully training generative models on private medical imaging datasets, and examining the synthetic data to ensure patient privacy before sharing it for medical research and applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.01054v3-abstract-full').style.display = 'none'; document.getElementById('2402.01054v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.14490">arXiv:2401.14490</a> <span> [<a href="https://arxiv.org/pdf/2401.14490">pdf</a>, <a href="https://arxiv.org/format/2401.14490">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> LongHealth: A Question Answering Benchmark with Long Clinical Documents </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Adams%2C+L">Lisa Adams</a>, <a href="/search/cs?searchtype=author&query=Busch%2C+F">Felix Busch</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Excoffier%2C+J">Jean-Baptiste Excoffier</a>, <a href="/search/cs?searchtype=author&query=Ortala%2C+M">Matthieu Ortala</a>, <a href="/search/cs?searchtype=author&query=L%C3%B6ser%2C+A">Alexander L枚ser</a>, <a href="/search/cs?searchtype=author&query=Aerts%2C+H+J">Hugo JWL. Aerts</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Bressem%2C+K">Keno Bressem</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.14490v1-abstract-short" style="display: inline;"> Background: Recent advancements in large language models (LLMs) offer potential benefits in healthcare, particularly in processing extensive patient records. However, existing benchmarks do not fully assess LLMs' capability in handling real-world, lengthy clinical data. Methods: We present the LongHealth benchmark, comprising 20 detailed fictional patient cases across various diseases, with each… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.14490v1-abstract-full').style.display = 'inline'; document.getElementById('2401.14490v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.14490v1-abstract-full" style="display: none;"> Background: Recent advancements in large language models (LLMs) offer potential benefits in healthcare, particularly in processing extensive patient records. However, existing benchmarks do not fully assess LLMs' capability in handling real-world, lengthy clinical data. Methods: We present the LongHealth benchmark, comprising 20 detailed fictional patient cases across various diseases, with each case containing 5,090 to 6,754 words. The benchmark challenges LLMs with 400 multiple-choice questions in three categories: information extraction, negation, and sorting, challenging LLMs to extract and interpret information from large clinical documents. Results: We evaluated nine open-source LLMs with a minimum of 16,000 tokens and also included OpenAI's proprietary and cost-efficient GPT-3.5 Turbo for comparison. The highest accuracy was observed for Mixtral-8x7B-Instruct-v0.1, particularly in tasks focused on information retrieval from single and multiple patient documents. However, all models struggled significantly in tasks requiring the identification of missing information, highlighting a critical area for improvement in clinical data interpretation. Conclusion: While LLMs show considerable potential for processing long clinical documents, their current accuracy levels are insufficient for reliable clinical use, especially in scenarios requiring the identification of missing information. The LongHealth benchmark provides a more realistic assessment of LLMs in a healthcare setting and highlights the need for further model refinement for safe and effective clinical application. We make the benchmark and evaluation code publicly available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.14490v1-abstract-full').style.display = 'none'; document.getElementById('2401.14490v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 3 figures, 5 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.10944">arXiv:2312.10944</a> <span> [<a href="https://arxiv.org/pdf/2312.10944">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> From Whole-slide Image to Biomarker Prediction: A Protocol for End-to-End Deep Learning in Computational Pathology </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Nahhas%2C+O+S+M+E">Omar S. M. El Nahhas</a>, <a href="/search/cs?searchtype=author&query=van+Treeck%2C+M">Marko van Treeck</a>, <a href="/search/cs?searchtype=author&query=W%C3%B6lflein%2C+G">Georg W枚lflein</a>, <a href="/search/cs?searchtype=author&query=Unger%2C+M">Michaela Unger</a>, <a href="/search/cs?searchtype=author&query=Ligero%2C+M">Marta Ligero</a>, <a href="/search/cs?searchtype=author&query=Lenz%2C+T">Tim Lenz</a>, <a href="/search/cs?searchtype=author&query=Wagner%2C+S+J">Sophia J. Wagner</a>, <a href="/search/cs?searchtype=author&query=Hewitt%2C+K+J">Katherine J. Hewitt</a>, <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Foersch%2C+S">Sebastian Foersch</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.10944v1-abstract-short" style="display: inline;"> Hematoxylin- and eosin (H&E) stained whole-slide images (WSIs) are the foundation of diagnosis of cancer. In recent years, development of deep learning-based methods in computational pathology enabled the prediction of biomarkers directly from WSIs. However, accurately linking tissue phenotype to biomarkers at scale remains a crucial challenge for democratizing complex biomarkers in precision onco… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.10944v1-abstract-full').style.display = 'inline'; document.getElementById('2312.10944v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.10944v1-abstract-full" style="display: none;"> Hematoxylin- and eosin (H&E) stained whole-slide images (WSIs) are the foundation of diagnosis of cancer. In recent years, development of deep learning-based methods in computational pathology enabled the prediction of biomarkers directly from WSIs. However, accurately linking tissue phenotype to biomarkers at scale remains a crucial challenge for democratizing complex biomarkers in precision oncology. This protocol describes a practical workflow for solid tumor associative modeling in pathology (STAMP), enabling prediction of biomarkers directly from WSIs using deep learning. The STAMP workflow is biomarker agnostic and allows for genetic- and clinicopathologic tabular data to be included as an additional input, together with histopathology images. The protocol consists of five main stages which have been successfully applied to various research problems: formal problem definition, data preprocessing, modeling, evaluation and clinical translation. The STAMP workflow differentiates itself through its focus on serving as a collaborative framework that can be used by clinicians and engineers alike for setting up research projects in the field of computational pathology. As an example task, we applied STAMP to the prediction of microsatellite instability (MSI) status in colorectal cancer, showing accurate performance for the identification of MSI-high tumors. Moreover, we provide an open-source codebase which has been deployed at several hospitals across the globe to set up computational pathology workflows. The STAMP workflow requires one workday of hands-on computational execution and basic command line knowledge. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.10944v1-abstract-full').style.display = 'none'; document.getElementById('2312.10944v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.11772">arXiv:2311.11772</a> <span> [<a href="https://arxiv.org/pdf/2311.11772">pdf</a>, <a href="https://arxiv.org/format/2311.11772">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Benchmarking Pathology Feature Extractors for Whole Slide Image Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=W%C3%B6lflein%2C+G">Georg W枚lflein</a>, <a href="/search/cs?searchtype=author&query=Ferber%2C+D">Dyke Ferber</a>, <a href="/search/cs?searchtype=author&query=Meneghetti%2C+A+R">Asier R. Meneghetti</a>, <a href="/search/cs?searchtype=author&query=Nahhas%2C+O+S+M+E">Omar S. M. El Nahhas</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Carrero%2C+Z+I">Zunamys I. Carrero</a>, <a href="/search/cs?searchtype=author&query=Harrison%2C+D+J">David J. Harrison</a>, <a href="/search/cs?searchtype=author&query=Arandjelovi%C4%87%2C+O">Ognjen Arandjelovi膰</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.11772v5-abstract-short" style="display: inline;"> Weakly supervised whole slide image classification is a key task in computational pathology, which involves predicting a slide-level label from a set of image patches constituting the slide. Constructing models to solve this task involves multiple design choices, often made without robust empirical or conclusive theoretical justification. To address this, we conduct a comprehensive benchmarking of… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.11772v5-abstract-full').style.display = 'inline'; document.getElementById('2311.11772v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.11772v5-abstract-full" style="display: none;"> Weakly supervised whole slide image classification is a key task in computational pathology, which involves predicting a slide-level label from a set of image patches constituting the slide. Constructing models to solve this task involves multiple design choices, often made without robust empirical or conclusive theoretical justification. To address this, we conduct a comprehensive benchmarking of feature extractors to answer three critical questions: 1) Is stain normalisation still a necessary preprocessing step? 2) Which feature extractors are best for downstream slide-level classification? 3) How does magnification affect downstream performance? Our study constitutes the most comprehensive evaluation of publicly available pathology feature extractors to date, involving more than 10,000 training runs across 14 feature extractors, 9 tasks, 5 datasets, 3 downstream architectures, 2 levels of magnification, and various preprocessing setups. Our findings challenge existing assumptions: 1) We observe empirically, and by analysing the latent space, that skipping stain normalisation and image augmentations does not degrade performance, while significantly reducing memory and computational demands. 2) We develop a novel evaluation metric to compare relative downstream performance, and show that the choice of feature extractor is the most consequential factor for downstream performance. 3) We find that lower-magnification slides are sufficient for accurate slide-level classification. Contrary to previous patch-level benchmarking studies, our approach emphasises clinical relevance by focusing on slide-level biomarker prediction tasks in a weakly supervised setting with external validation cohorts. Our findings stand to streamline digital pathology workflows by minimising preprocessing needs and informing the selection of feature extractors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.11772v5-abstract-full').style.display = 'none'; document.getElementById('2311.11772v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">For the conference version see: arXiv:2311.11772v4. For the longer journal version with additional experiments see arXiv:2311.11772v5</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.17007">arXiv:2309.17007</a> <span> [<a href="https://arxiv.org/pdf/2309.17007">pdf</a>, <a href="https://arxiv.org/format/2309.17007">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Medical Foundation Models are Susceptible to Targeted Misinformation Attacks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+T">Tianci Wang</a>, <a href="/search/cs?searchtype=author&query=Mueller-Franzes%2C+G">Gustav Mueller-Franzes</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=F%C3%B6rsch%2C+S">Sebastian F枚rsch</a>, <a href="/search/cs?searchtype=author&query=Kleesiek%2C+J">Jens Kleesiek</a>, <a href="/search/cs?searchtype=author&query=Haarburger%2C+C">Christoph Haarburger</a>, <a href="/search/cs?searchtype=author&query=Bressem%2C+K+K">Keno K. Bressem</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.17007v1-abstract-short" style="display: inline;"> Large language models (LLMs) have broad medical knowledge and can reason about medical information across many domains, holding promising potential for diverse medical applications in the near future. In this study, we demonstrate a concerning vulnerability of LLMs in medicine. Through targeted manipulation of just 1.1% of the model's weights, we can deliberately inject an incorrect biomedical fac… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.17007v1-abstract-full').style.display = 'inline'; document.getElementById('2309.17007v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.17007v1-abstract-full" style="display: none;"> Large language models (LLMs) have broad medical knowledge and can reason about medical information across many domains, holding promising potential for diverse medical applications in the near future. In this study, we demonstrate a concerning vulnerability of LLMs in medicine. Through targeted manipulation of just 1.1% of the model's weights, we can deliberately inject an incorrect biomedical fact. The erroneous information is then propagated in the model's output, whilst its performance on other biomedical tasks remains intact. We validate our findings in a set of 1,038 incorrect biomedical facts. This peculiar susceptibility raises serious security and trustworthiness concerns for the application of LLMs in healthcare settings. It accentuates the need for robust protective measures, thorough verification mechanisms, and stringent management of access to these models, ensuring their reliable and safe use in medical practice. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.17007v1-abstract-full').style.display = 'none'; document.getElementById('2309.17007v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.14120">arXiv:2308.14120</a> <span> [<a href="https://arxiv.org/pdf/2308.14120">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41467-024-45879-8">10.1038/s41467-024-45879-8 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Large Language Models Streamline Automated Machine Learning for Clinical Studies </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Arasteh%2C+S+T">Soroosh Tayebi Arasteh</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Lotfinia%2C+M">Mahshad Lotfinia</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.14120v5-abstract-short" style="display: inline;"> A knowledge gap persists between machine learning (ML) developers (e.g., data scientists) and practitioners (e.g., clinicians), hampering the full utilization of ML for clinical data analysis. We investigated the potential of the ChatGPT Advanced Data Analysis (ADA), an extension of GPT-4, to bridge this gap and perform ML analyses efficiently. Real-world clinical datasets and study details from l… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.14120v5-abstract-full').style.display = 'inline'; document.getElementById('2308.14120v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.14120v5-abstract-full" style="display: none;"> A knowledge gap persists between machine learning (ML) developers (e.g., data scientists) and practitioners (e.g., clinicians), hampering the full utilization of ML for clinical data analysis. We investigated the potential of the ChatGPT Advanced Data Analysis (ADA), an extension of GPT-4, to bridge this gap and perform ML analyses efficiently. Real-world clinical datasets and study details from large trials across various medical specialties were presented to ChatGPT ADA without specific guidance. ChatGPT ADA autonomously developed state-of-the-art ML models based on the original study's training data to predict clinical outcomes such as cancer development, cancer progression, disease complications, or biomarkers such as pathogenic gene sequences. Following the re-implementation and optimization of the published models, the head-to-head comparison of the ChatGPT ADA-crafted ML models and their respective manually crafted counterparts revealed no significant differences in traditional performance metrics (P>0.071). Strikingly, the ChatGPT ADA-crafted ML models often outperformed their counterparts. In conclusion, ChatGPT ADA offers a promising avenue to democratize ML in medicine by simplifying complex data analyses, yet should enhance, not replace, specialized training and resources, to promote broader applications in medical research and practice. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.14120v5-abstract-full').style.display = 'none'; document.getElementById('2308.14120v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in Nature Communications</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Nat Commun 15, 1603 (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.07688">arXiv:2308.07688</a> <span> [<a href="https://arxiv.org/pdf/2308.07688">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1186/s41747-023-00411-3">10.1186/s41747-023-00411-3 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Enhancing Network Initialization for Medical AI Models Using Large-Scale, Unlabeled Natural Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Arasteh%2C+S+T">Soroosh Tayebi Arasteh</a>, <a href="/search/cs?searchtype=author&query=Misera%2C+L">Leo Misera</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.07688v5-abstract-short" style="display: inline;"> Pre-training datasets, like ImageNet, have become the gold standard in medical image analysis. However, the emergence of self-supervised learning (SSL), which leverages unlabeled data to learn robust features, presents an opportunity to bypass the intensive labeling process. In this study, we explored if SSL for pre-training on non-medical images can be applied to chest radiographs and how it comp… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.07688v5-abstract-full').style.display = 'inline'; document.getElementById('2308.07688v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.07688v5-abstract-full" style="display: none;"> Pre-training datasets, like ImageNet, have become the gold standard in medical image analysis. However, the emergence of self-supervised learning (SSL), which leverages unlabeled data to learn robust features, presents an opportunity to bypass the intensive labeling process. In this study, we explored if SSL for pre-training on non-medical images can be applied to chest radiographs and how it compares to supervised pre-training on non-medical images and on medical images. We utilized a vision transformer and initialized its weights based on (i) SSL pre-training on natural images (DINOv2), (ii) SL pre-training on natural images (ImageNet dataset), and (iii) SL pre-training on chest radiographs from the MIMIC-CXR database. We tested our approach on over 800,000 chest radiographs from six large global datasets, diagnosing more than 20 different imaging findings. Our SSL pre-training on curated images not only outperformed ImageNet-based pre-training (P<0.001 for all datasets) but, in certain cases, also exceeded SL on the MIMIC-CXR dataset. Our findings suggest that selecting the right pre-training strategy, especially with SSL, can be pivotal for improving artificial intelligence (AI)'s diagnostic accuracy in medical imaging. By demonstrating the promise of SSL in chest radiograph analysis, we underline a transformative shift towards more efficient and accurate AI models in medical imaging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.07688v5-abstract-full').style.display = 'none'; document.getElementById('2308.07688v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in European Radiology Experimental</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Eur Radiol Exp 8, 10 (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.02800">arXiv:2306.02800</a> <span> [<a href="https://arxiv.org/pdf/2306.02800">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.jaad.2023.11.065">10.1016/j.jaad.2023.11.065 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Using Multiple Dermoscopic Photographs of One Lesion Improves Melanoma Classification via Deep Learning: A Prognostic Diagnostic Accuracy Study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hekler%2C+A">Achim Hekler</a>, <a href="/search/cs?searchtype=author&query=Maron%2C+R+C">Roman C. Maron</a>, <a href="/search/cs?searchtype=author&query=Haggenm%C3%BCller%2C+S">Sarah Haggenm眉ller</a>, <a href="/search/cs?searchtype=author&query=Schmitt%2C+M">Max Schmitt</a>, <a href="/search/cs?searchtype=author&query=Wies%2C+C">Christoph Wies</a>, <a href="/search/cs?searchtype=author&query=Utikal%2C+J+S">Jochen S. Utikal</a>, <a href="/search/cs?searchtype=author&query=Meier%2C+F">Friedegund Meier</a>, <a href="/search/cs?searchtype=author&query=Hobelsberger%2C+S">Sarah Hobelsberger</a>, <a href="/search/cs?searchtype=author&query=Gellrich%2C+F+F">Frank F. Gellrich</a>, <a href="/search/cs?searchtype=author&query=Sergon%2C+M">Mildred Sergon</a>, <a href="/search/cs?searchtype=author&query=Hauschild%2C+A">Axel Hauschild</a>, <a href="/search/cs?searchtype=author&query=French%2C+L+E">Lars E. French</a>, <a href="/search/cs?searchtype=author&query=Heinzerling%2C+L">Lucie Heinzerling</a>, <a href="/search/cs?searchtype=author&query=Schlager%2C+J+G">Justin G. Schlager</a>, <a href="/search/cs?searchtype=author&query=Ghoreschi%2C+K">Kamran Ghoreschi</a>, <a href="/search/cs?searchtype=author&query=Schlaak%2C+M">Max Schlaak</a>, <a href="/search/cs?searchtype=author&query=Hilke%2C+F+J">Franz J. Hilke</a>, <a href="/search/cs?searchtype=author&query=Poch%2C+G">Gabriela Poch</a>, <a href="/search/cs?searchtype=author&query=Korsing%2C+S">S枚ren Korsing</a>, <a href="/search/cs?searchtype=author&query=Berking%2C+C">Carola Berking</a>, <a href="/search/cs?searchtype=author&query=Heppt%2C+M+V">Markus V. Heppt</a>, <a href="/search/cs?searchtype=author&query=Erdmann%2C+M">Michael Erdmann</a>, <a href="/search/cs?searchtype=author&query=Haferkamp%2C+S">Sebastian Haferkamp</a>, <a href="/search/cs?searchtype=author&query=Drexler%2C+K">Konstantin Drexler</a>, <a href="/search/cs?searchtype=author&query=Schadendorf%2C+D">Dirk Schadendorf</a> , et al. (6 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.02800v1-abstract-short" style="display: inline;"> Background: Convolutional neural network (CNN)-based melanoma classifiers face several challenges that limit their usefulness in clinical practice. Objective: To investigate the impact of multiple real-world dermoscopic views of a single lesion of interest on a CNN-based melanoma classifier. Methods: This study evaluated 656 suspected melanoma lesions. Classifier performance was measured using a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.02800v1-abstract-full').style.display = 'inline'; document.getElementById('2306.02800v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.02800v1-abstract-full" style="display: none;"> Background: Convolutional neural network (CNN)-based melanoma classifiers face several challenges that limit their usefulness in clinical practice. Objective: To investigate the impact of multiple real-world dermoscopic views of a single lesion of interest on a CNN-based melanoma classifier. Methods: This study evaluated 656 suspected melanoma lesions. Classifier performance was measured using area under the receiver operating characteristic curve (AUROC), expected calibration error (ECE) and maximum confidence change (MCC) for (I) a single-view scenario, (II) a multiview scenario using multiple artificially modified images per lesion and (III) a multiview scenario with multiple real-world images per lesion. Results: The multiview approach with real-world images significantly increased the AUROC from 0.905 (95% CI, 0.879-0.929) in the single-view approach to 0.930 (95% CI, 0.909-0.951). ECE and MCC also improved significantly from 0.131 (95% CI, 0.105-0.159) to 0.072 (95% CI: 0.052-0.093) and from 0.149 (95% CI, 0.125-0.171) to 0.115 (95% CI: 0.099-0.131), respectively. Comparing multiview real-world to artificially modified images showed comparable diagnostic accuracy and uncertainty estimation, but significantly worse robustness for the latter. Conclusion: Using multiple real-world images is an inexpensive method to positively impact the performance of a CNN-based melanoma classifier. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.02800v1-abstract-full').style.display = 'none'; document.getElementById('2306.02800v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.06963">arXiv:2305.06963</a> <span> [<a href="https://arxiv.org/pdf/2305.06963">pdf</a>, <a href="https://arxiv.org/format/2305.06963">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Cascaded Cross-Attention Networks for Data-Efficient Whole-Slide Image Classification Using Transformers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.06963v1-abstract-short" style="display: inline;"> Whole-Slide Imaging allows for the capturing and digitization of high-resolution images of histological specimen. An automated analysis of such images using deep learning models is therefore of high demand. The transformer architecture has been proposed as a possible candidate for effectively leveraging the high-resolution information. Here, the whole-slide image is partitioned into smaller image… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.06963v1-abstract-full').style.display = 'inline'; document.getElementById('2305.06963v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.06963v1-abstract-full" style="display: none;"> Whole-Slide Imaging allows for the capturing and digitization of high-resolution images of histological specimen. An automated analysis of such images using deep learning models is therefore of high demand. The transformer architecture has been proposed as a possible candidate for effectively leveraging the high-resolution information. Here, the whole-slide image is partitioned into smaller image patches and feature tokens are extracted from these image patches. However, while the conventional transformer allows for a simultaneous processing of a large set of input tokens, the computational demand scales quadratically with the number of input tokens and thus quadratically with the number of image patches. To address this problem we propose a novel cascaded cross-attention network (CCAN) based on the cross-attention mechanism that scales linearly with the number of extracted patches. Our experiments demonstrate that this architecture is at least on-par with and even outperforms other attention-based state-of-the-art methods on two public datasets: On the use-case of lung cancer (TCGA NSCLC) our model reaches a mean area under the receiver operating characteristic (AUC) of 0.970 $\pm$ 0.008 and on renal cancer (TCGA RCC) reaches a mean AUC of 0.985 $\pm$ 0.004. Furthermore, we show that our proposed model is efficient in low-data regimes, making it a promising approach for analyzing whole-slide images in resource-limited settings. To foster research in this direction, we make our code publicly available on GitHub: XXX. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.06963v1-abstract-full').style.display = 'none'; document.getElementById('2305.06963v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.08972">arXiv:2304.08972</a> <span> [<a href="https://arxiv.org/pdf/2304.08972">pdf</a>, <a href="https://arxiv.org/format/2304.08972">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41598-023-41331-x">10.1038/s41598-023-41331-x <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fibroglandular Tissue Segmentation in Breast MRI using Vision Transformers -- A multi-institutional evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=M%C3%BCller-Franzes%2C+G">Gustav M眉ller-Franzes</a>, <a href="/search/cs?searchtype=author&query=M%C3%BCller-Franzes%2C+F">Fritz M眉ller-Franzes</a>, <a href="/search/cs?searchtype=author&query=Huck%2C+L">Luisa Huck</a>, <a href="/search/cs?searchtype=author&query=Raaff%2C+V">Vanessa Raaff</a>, <a href="/search/cs?searchtype=author&query=Kemmer%2C+E">Eva Kemmer</a>, <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Arasteh%2C+S+T">Soroosh Tayebi Arasteh</a>, <a href="/search/cs?searchtype=author&query=Nolte%2C+T">Teresa Nolte</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.08972v1-abstract-short" style="display: inline;"> Accurate and automatic segmentation of fibroglandular tissue in breast MRI screening is essential for the quantification of breast density and background parenchymal enhancement. In this retrospective study, we developed and evaluated a transformer-based neural network for breast segmentation (TraBS) in multi-institutional MRI data, and compared its performance to the well established convolutiona… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.08972v1-abstract-full').style.display = 'inline'; document.getElementById('2304.08972v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.08972v1-abstract-full" style="display: none;"> Accurate and automatic segmentation of fibroglandular tissue in breast MRI screening is essential for the quantification of breast density and background parenchymal enhancement. In this retrospective study, we developed and evaluated a transformer-based neural network for breast segmentation (TraBS) in multi-institutional MRI data, and compared its performance to the well established convolutional neural network nnUNet. TraBS and nnUNet were trained and tested on 200 internal and 40 external breast MRI examinations using manual segmentations generated by experienced human readers. Segmentation performance was assessed in terms of the Dice score and the average symmetric surface distance. The Dice score for nnUNet was lower than for TraBS on the internal testset (0.909$\pm$0.069 versus 0.916$\pm$0.067, P<0.001) and on the external testset (0.824$\pm$0.144 versus 0.864$\pm$0.081, P=0.004). Moreover, the average symmetric surface distance was higher (=worse) for nnUNet than for TraBS on the internal (0.657$\pm$2.856 versus 0.548$\pm$2.195, P=0.001) and on the external testset (0.727$\pm$0.620 versus 0.584$\pm$0.413, P=0.03). Our study demonstrates that transformer-based networks improve the quality of fibroglandular tissue segmentation in breast MRI compared to convolutional-based models like nnUNet. These findings might help to enhance the accuracy of breast density and parenchymal enhancement quantification in breast MRI screening. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.08972v1-abstract-full').style.display = 'none'; document.getElementById('2304.08972v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Sci Rep 13, 14207 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.05153">arXiv:2304.05153</a> <span> [<a href="https://arxiv.org/pdf/2304.05153">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Regression-based Deep-Learning predicts molecular biomarkers from pathology slides </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Nahhas%2C+O+S+M+E">Omar S. M. El Nahhas</a>, <a href="/search/cs?searchtype=author&query=Loeffler%2C+C+M+L">Chiara M. L. Loeffler</a>, <a href="/search/cs?searchtype=author&query=Carrero%2C+Z+I">Zunamys I. Carrero</a>, <a href="/search/cs?searchtype=author&query=van+Treeck%2C+M">Marko van Treeck</a>, <a href="/search/cs?searchtype=author&query=Kolbinger%2C+F+R">Fiona R. Kolbinger</a>, <a href="/search/cs?searchtype=author&query=Hewitt%2C+K+J">Katherine J. Hewitt</a>, <a href="/search/cs?searchtype=author&query=Muti%2C+H+S">Hannah S. Muti</a>, <a href="/search/cs?searchtype=author&query=Graziani%2C+M">Mara Graziani</a>, <a href="/search/cs?searchtype=author&query=Zeng%2C+Q">Qinghe Zeng</a>, <a href="/search/cs?searchtype=author&query=Calderaro%2C+J">Julien Calderaro</a>, <a href="/search/cs?searchtype=author&query=Ortiz-Br%C3%BCchle%2C+N">Nadina Ortiz-Br眉chle</a>, <a href="/search/cs?searchtype=author&query=Yuan%2C+T">Tanwei Yuan</a>, <a href="/search/cs?searchtype=author&query=Hoffmeister%2C+M">Michael Hoffmeister</a>, <a href="/search/cs?searchtype=author&query=Brenner%2C+H">Hermann Brenner</a>, <a href="/search/cs?searchtype=author&query=Brobeil%2C+A">Alexander Brobeil</a>, <a href="/search/cs?searchtype=author&query=Reis-Filho%2C+J+S">Jorge S. Reis-Filho</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.05153v1-abstract-short" style="display: inline;"> Deep Learning (DL) can predict biomarkers from cancer histopathology. Several clinically approved applications use this technology. Most approaches, however, predict categorical labels, whereas biomarkers are often continuous measurements. We hypothesized that regression-based DL outperforms classification-based DL. Therefore, we developed and evaluated a new self-supervised attention-based weakly… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.05153v1-abstract-full').style.display = 'inline'; document.getElementById('2304.05153v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.05153v1-abstract-full" style="display: none;"> Deep Learning (DL) can predict biomarkers from cancer histopathology. Several clinically approved applications use this technology. Most approaches, however, predict categorical labels, whereas biomarkers are often continuous measurements. We hypothesized that regression-based DL outperforms classification-based DL. Therefore, we developed and evaluated a new self-supervised attention-based weakly supervised regression method that predicts continuous biomarkers directly from images in 11,671 patients across nine cancer types. We tested our method for multiple clinically and biologically relevant biomarkers: homologous repair deficiency (HRD) score, a clinically used pan-cancer biomarker, as well as markers of key biological processes in the tumor microenvironment. Using regression significantly enhances the accuracy of biomarker prediction, while also improving the interpretability of the results over classification. In a large cohort of colorectal cancer patients, regression-based prediction scores provide a higher prognostic value than classification-based scores. Our open-source regression approach offers a promising alternative for continuous biomarker analysis in computational pathology. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.05153v1-abstract-full').style.display = 'none'; document.getElementById('2304.05153v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.09617">arXiv:2301.09617</a> <span> [<a href="https://arxiv.org/pdf/2301.09617">pdf</a>, <a href="https://arxiv.org/format/2301.09617">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Fully transformer-based biomarker prediction from colorectal cancer histology: a large-scale multicentric study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wagner%2C+S+J">Sophia J. Wagner</a>, <a href="/search/cs?searchtype=author&query=Reisenb%C3%BCchler%2C+D">Daniel Reisenb眉chler</a>, <a href="/search/cs?searchtype=author&query=West%2C+N+P">Nicholas P. West</a>, <a href="/search/cs?searchtype=author&query=Niehues%2C+J+M">Jan Moritz Niehues</a>, <a href="/search/cs?searchtype=author&query=Veldhuizen%2C+G+P">Gregory Patrick Veldhuizen</a>, <a href="/search/cs?searchtype=author&query=Quirke%2C+P">Philip Quirke</a>, <a href="/search/cs?searchtype=author&query=Grabsch%2C+H+I">Heike I. Grabsch</a>, <a href="/search/cs?searchtype=author&query=Brandt%2C+P+A+v+d">Piet A. van den Brandt</a>, <a href="/search/cs?searchtype=author&query=Hutchins%2C+G+G+A">Gordon G. A. Hutchins</a>, <a href="/search/cs?searchtype=author&query=Richman%2C+S+D">Susan D. Richman</a>, <a href="/search/cs?searchtype=author&query=Yuan%2C+T">Tanwei Yuan</a>, <a href="/search/cs?searchtype=author&query=Langer%2C+R">Rupert Langer</a>, <a href="/search/cs?searchtype=author&query=Jenniskens%2C+J+C+A">Josien Christina Anna Jenniskens</a>, <a href="/search/cs?searchtype=author&query=Offermans%2C+K">Kelly Offermans</a>, <a href="/search/cs?searchtype=author&query=Mueller%2C+W">Wolfram Mueller</a>, <a href="/search/cs?searchtype=author&query=Gray%2C+R">Richard Gray</a>, <a href="/search/cs?searchtype=author&query=Gruber%2C+S+B">Stephen B. Gruber</a>, <a href="/search/cs?searchtype=author&query=Greenson%2C+J+K">Joel K. Greenson</a>, <a href="/search/cs?searchtype=author&query=Rennert%2C+G">Gad Rennert</a>, <a href="/search/cs?searchtype=author&query=Bonner%2C+J+D">Joseph D. Bonner</a>, <a href="/search/cs?searchtype=author&query=Schmolze%2C+D">Daniel Schmolze</a>, <a href="/search/cs?searchtype=author&query=James%2C+J+A">Jacqueline A. James</a>, <a href="/search/cs?searchtype=author&query=Loughrey%2C+M+B">Maurice B. Loughrey</a>, <a href="/search/cs?searchtype=author&query=Salto-Tellez%2C+M">Manuel Salto-Tellez</a>, <a href="/search/cs?searchtype=author&query=Brenner%2C+H">Hermann Brenner</a> , et al. (6 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.09617v2-abstract-short" style="display: inline;"> Background: Deep learning (DL) can extract predictive and prognostic biomarkers from routine pathology slides in colorectal cancer. For example, a DL test for the diagnosis of microsatellite instability (MSI) in CRC has been approved in 2022. Current approaches rely on convolutional neural networks (CNNs). Transformer networks are outperforming CNNs and are replacing them in many applications, but… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.09617v2-abstract-full').style.display = 'inline'; document.getElementById('2301.09617v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.09617v2-abstract-full" style="display: none;"> Background: Deep learning (DL) can extract predictive and prognostic biomarkers from routine pathology slides in colorectal cancer. For example, a DL test for the diagnosis of microsatellite instability (MSI) in CRC has been approved in 2022. Current approaches rely on convolutional neural networks (CNNs). Transformer networks are outperforming CNNs and are replacing them in many applications, but have not been used for biomarker prediction in cancer at a large scale. In addition, most DL approaches have been trained on small patient cohorts, which limits their clinical utility. Methods: In this study, we developed a new fully transformer-based pipeline for end-to-end biomarker prediction from pathology slides. We combine a pre-trained transformer encoder and a transformer network for patch aggregation, capable of yielding single and multi-target prediction at patient level. We train our pipeline on over 9,000 patients from 10 colorectal cancer cohorts. Results: A fully transformer-based approach massively improves the performance, generalizability, data efficiency, and interpretability as compared with current state-of-the-art algorithms. After training on a large multicenter cohort, we achieve a sensitivity of 0.97 with a negative predictive value of 0.99 for MSI prediction on surgical resection specimens. We demonstrate for the first time that resection specimen-only training reaches clinical-grade performance on endoscopic biopsy tissue, solving a long-standing diagnostic problem. Interpretation: A fully transformer-based end-to-end pipeline trained on thousands of pathology slides yields clinical-grade performance for biomarker prediction on surgical resections and biopsies. Our new methods are freely available under an open source license. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.09617v2-abstract-full').style.display = 'none'; document.getElementById('2301.09617v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Updated Figure 2 and Table A.5</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.09162">arXiv:2212.09162</a> <span> [<a href="https://arxiv.org/pdf/2212.09162">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Medical Diagnosis with Large Scale Multimodal Transformers: Leveraging Diverse Data for More Accurate Diagnosis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Mueller-Franzes%2C+G">Gustav Mueller-Franzes</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+T">Tianci Wang</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Arasteh%2C+S+T">Soroosh Tayebi Arasteh</a>, <a href="/search/cs?searchtype=author&query=Haarburger%2C+C">Christoph Haarburger</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Bressem%2C+K">Keno Bressem</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.09162v2-abstract-short" style="display: inline;"> Multimodal deep learning has been used to predict clinical endpoints and diagnoses from clinical routine data. However, these models suffer from scaling issues: they have to learn pairwise interactions between each piece of information in each data type, thereby escalating model complexity beyond manageable scales. This has so far precluded a widespread use of multimodal deep learning. Here, we pr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.09162v2-abstract-full').style.display = 'inline'; document.getElementById('2212.09162v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.09162v2-abstract-full" style="display: none;"> Multimodal deep learning has been used to predict clinical endpoints and diagnoses from clinical routine data. However, these models suffer from scaling issues: they have to learn pairwise interactions between each piece of information in each data type, thereby escalating model complexity beyond manageable scales. This has so far precluded a widespread use of multimodal deep learning. Here, we present a new technical approach of "learnable synergies", in which the model only selects relevant interactions between data modalities and keeps an "internal memory" of relevant data. Our approach is easily scalable and naturally adapts to multimodal data inputs from clinical routine. We demonstrate this approach on three large multimodal datasets from radiology and ophthalmology and show that it outperforms state-of-the-art models in clinically relevant diagnosis tasks. Our new approach is transferable and will allow the application of multimodal deep learning to a broad set of clinically relevant problems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.09162v2-abstract-full').style.display = 'none'; document.getElementById('2212.09162v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.07501">arXiv:2212.07501</a> <span> [<a href="https://arxiv.org/pdf/2212.07501">pdf</a>, <a href="https://arxiv.org/format/2212.07501">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41598-023-39278-0">10.1038/s41598-023-39278-0 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Diffusion Probabilistic Models beat GANs on Medical Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=M%C3%BCller-Franzes%2C+G">Gustav M眉ller-Franzes</a>, <a href="/search/cs?searchtype=author&query=Niehues%2C+J+M">Jan Moritz Niehues</a>, <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Arasteh%2C+S+T">Soroosh Tayebi Arasteh</a>, <a href="/search/cs?searchtype=author&query=Haarburger%2C+C">Christoph Haarburger</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+T">Tianci Wang</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.07501v1-abstract-short" style="display: inline;"> The success of Deep Learning applications critically depends on the quality and scale of the underlying training data. Generative adversarial networks (GANs) can generate arbitrary large datasets, but diversity and fidelity are limited, which has recently been addressed by denoising diffusion probabilistic models (DDPMs) whose superiority has been demonstrated on natural images. In this study, we… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.07501v1-abstract-full').style.display = 'inline'; document.getElementById('2212.07501v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.07501v1-abstract-full" style="display: none;"> The success of Deep Learning applications critically depends on the quality and scale of the underlying training data. Generative adversarial networks (GANs) can generate arbitrary large datasets, but diversity and fidelity are limited, which has recently been addressed by denoising diffusion probabilistic models (DDPMs) whose superiority has been demonstrated on natural images. In this study, we propose Medfusion, a conditional latent DDPM for medical images. We compare our DDPM-based model against GAN-based models, which constitute the current state-of-the-art in the medical domain. Medfusion was trained and compared with (i) StyleGan-3 on n=101,442 images from the AIROGS challenge dataset to generate fundoscopies with and without glaucoma, (ii) ProGAN on n=191,027 from the CheXpert dataset to generate radiographs with and without cardiomegaly and (iii) wGAN on n=19,557 images from the CRCMS dataset to generate histopathological images with and without microsatellite stability. In the AIROGS, CRMCS, and CheXpert datasets, Medfusion achieved lower (=better) FID than the GANs (11.63 versus 20.43, 30.03 versus 49.26, and 17.28 versus 84.31). Also, fidelity (precision) and diversity (recall) were higher (=better) for Medfusion in all three datasets. Our study shows that DDPM are a superior alternative to GANs for image synthesis in the medical domain. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.07501v1-abstract-full').style.display = 'none'; document.getElementById('2212.07501v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Sci Rep 13, 12098 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.13606">arXiv:2211.13606</a> <span> [<a href="https://arxiv.org/pdf/2211.13606">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41598-023-33303-y">10.1038/s41598-023-33303-y <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Collaborative Training of Medical Artificial Intelligence Models with non-uniform Labels </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Arasteh%2C+S+T">Soroosh Tayebi Arasteh</a>, <a href="/search/cs?searchtype=author&query=Isfort%2C+P">Peter Isfort</a>, <a href="/search/cs?searchtype=author&query=Saehn%2C+M">Marwin Saehn</a>, <a href="/search/cs?searchtype=author&query=Mueller-Franzes%2C+G">Gustav Mueller-Franzes</a>, <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.13606v2-abstract-short" style="display: inline;"> Due to the rapid advancements in recent years, medical image analysis is largely dominated by deep learning (DL). However, building powerful and robust DL models requires training with large multi-party datasets. While multiple stakeholders have provided publicly available datasets, the ways in which these data are labeled vary widely. For Instance, an institution might provide a dataset of chest… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.13606v2-abstract-full').style.display = 'inline'; document.getElementById('2211.13606v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.13606v2-abstract-full" style="display: none;"> Due to the rapid advancements in recent years, medical image analysis is largely dominated by deep learning (DL). However, building powerful and robust DL models requires training with large multi-party datasets. While multiple stakeholders have provided publicly available datasets, the ways in which these data are labeled vary widely. For Instance, an institution might provide a dataset of chest radiographs containing labels denoting the presence of pneumonia, while another institution might have a focus on determining the presence of metastases in the lung. Training a single AI model utilizing all these data is not feasible with conventional federated learning (FL). This prompts us to propose an extension to the widespread FL process, namely flexible federated learning (FFL) for collaborative training on such data. Using 695,000 chest radiographs from five institutions from across the globe - each with differing labels - we demonstrate that having heterogeneously labeled datasets, FFL-based training leads to significant performance increase compared to conventional FL training, where only the uniformly annotated images are utilized. We believe that our proposed algorithm could accelerate the process of bringing collaborative training methods from research and simulation phase to the real-world applications in healthcare. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.13606v2-abstract-full').style.display = 'none'; document.getElementById('2211.13606v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in Nature Scientific Reports</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Sci Rep 13, 6046 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.03364">arXiv:2211.03364</a> <span> [<a href="https://arxiv.org/pdf/2211.03364">pdf</a>, <a href="https://arxiv.org/format/2211.03364">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Medical Diffusion: Denoising Diffusion Probabilistic Models for 3D Medical Image Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Mueller-Franzes%2C+G">Gustav Mueller-Franzes</a>, <a href="/search/cs?searchtype=author&query=Arasteh%2C+S+T">Soroosh Tayebi Arasteh</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Haarburger%2C+C">Christoph Haarburger</a>, <a href="/search/cs?searchtype=author&query=Schulze-Hagen%2C+M">Maximilian Schulze-Hagen</a>, <a href="/search/cs?searchtype=author&query=Schad%2C+P">Philipp Schad</a>, <a href="/search/cs?searchtype=author&query=Engelhardt%2C+S">Sandy Engelhardt</a>, <a href="/search/cs?searchtype=author&query=Baessler%2C+B">Bettina Baessler</a>, <a href="/search/cs?searchtype=author&query=Foersch%2C+S">Sebastian Foersch</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.03364v7-abstract-short" style="display: inline;"> Recent advances in computer vision have shown promising results in image generation. Diffusion probabilistic models in particular have generated realistic images from textual input, as demonstrated by DALL-E 2, Imagen and Stable Diffusion. However, their use in medicine, where image data typically comprises three-dimensional volumes, has not been systematically evaluated. Synthetic images may play… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.03364v7-abstract-full').style.display = 'inline'; document.getElementById('2211.03364v7-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.03364v7-abstract-full" style="display: none;"> Recent advances in computer vision have shown promising results in image generation. Diffusion probabilistic models in particular have generated realistic images from textual input, as demonstrated by DALL-E 2, Imagen and Stable Diffusion. However, their use in medicine, where image data typically comprises three-dimensional volumes, has not been systematically evaluated. Synthetic images may play a crucial role in privacy preserving artificial intelligence and can also be used to augment small datasets. Here we show that diffusion probabilistic models can synthesize high quality medical imaging data, which we show for Magnetic Resonance Images (MRI) and Computed Tomography (CT) images. We provide quantitative measurements of their performance through a reader study with two medical experts who rated the quality of the synthesized images in three categories: Realistic image appearance, anatomical correctness and consistency between slices. Furthermore, we demonstrate that synthetic images can be used in a self-supervised pre-training and improve the performance of breast segmentation models when data is scarce (dice score 0.91 vs. 0.95 without vs. with synthetic data). The code is publicly available on GitHub: https://github.com/FirasGit/medicaldiffusion. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.03364v7-abstract-full').style.display = 'none'; document.getElementById('2211.03364v7-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.10033">arXiv:2206.10033</a> <span> [<a href="https://arxiv.org/pdf/2206.10033">pdf</a>, <a href="https://arxiv.org/format/2206.10033">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Test Time Transform Prediction for Open Set Histopathological Image Recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Galdran%2C+A">Adrian Galdran</a>, <a href="/search/cs?searchtype=author&query=Hewitt%2C+K+J">Katherine J. Hewitt</a>, <a href="/search/cs?searchtype=author&query=Ghaffari%2C+N+L">Narmin L. Ghaffari</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob N. Kather</a>, <a href="/search/cs?searchtype=author&query=Carneiro%2C+G">Gustavo Carneiro</a>, <a href="/search/cs?searchtype=author&query=Ballester%2C+M+A+G">Miguel A. Gonz谩lez Ballester</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.10033v2-abstract-short" style="display: inline;"> Tissue typology annotation in Whole Slide histological images is a complex and tedious, yet necessary task for the development of computational pathology models. We propose to address this problem by applying Open Set Recognition techniques to the task of jointly classifying tissue that belongs to a set of annotated classes, e.g. clinically relevant tissue categories, while rejecting in test time… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.10033v2-abstract-full').style.display = 'inline'; document.getElementById('2206.10033v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.10033v2-abstract-full" style="display: none;"> Tissue typology annotation in Whole Slide histological images is a complex and tedious, yet necessary task for the development of computational pathology models. We propose to address this problem by applying Open Set Recognition techniques to the task of jointly classifying tissue that belongs to a set of annotated classes, e.g. clinically relevant tissue categories, while rejecting in test time Open Set samples, i.e. images that belong to categories not present in the training set. To this end, we introduce a new approach for Open Set histopathological image recognition based on training a model to accurately identify image categories and simultaneously predict which data augmentation transform has been applied. In test time, we measure model confidence in predicting this transform, which we expect to be lower for images in the Open Set. We carry out comprehensive experiments in the context of colorectal cancer assessment from histological images, which provide evidence on the strengths of our approach to automatically identify samples from unknown categories. Code is released at https://github.com/agaldran/t3po . <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.10033v2-abstract-full').style.display = 'none'; document.getElementById('2206.10033v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to MICCAI 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.11439">arXiv:2111.11439</a> <span> [<a href="https://arxiv.org/pdf/2111.11439">pdf</a>, <a href="https://arxiv.org/format/2111.11439">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Image prediction of disease progression by style-based manifold extrapolation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Pedersoli%2C+F">Federico Pedersoli</a>, <a href="/search/cs?searchtype=author&query=Zimmermann%2C+M">Markus Zimmermann</a>, <a href="/search/cs?searchtype=author&query=Keil%2C+S">Sebastian Keil</a>, <a href="/search/cs?searchtype=author&query=Schulze-Hagen%2C+M">Maximilian Schulze-Hagen</a>, <a href="/search/cs?searchtype=author&query=Terwoelbeck%2C+M">Marc Terwoelbeck</a>, <a href="/search/cs?searchtype=author&query=Isfort%2C+P">Peter Isfort</a>, <a href="/search/cs?searchtype=author&query=Haarburger%2C+C">Christoph Haarburger</a>, <a href="/search/cs?searchtype=author&query=Kiessling%2C+F">Fabian Kiessling</a>, <a href="/search/cs?searchtype=author&query=Schulz%2C+V">Volkmar Schulz</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.11439v2-abstract-short" style="display: inline;"> Disease-modifying management aims to prevent deterioration and progression of the disease, not just relieve symptoms. Unfortunately, the development of necessary therapies is often hampered by the failure to recognize the presymptomatic disease and limited understanding of disease development. We present a generic solution for this problem by a methodology that allows the prediction of progression… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.11439v2-abstract-full').style.display = 'inline'; document.getElementById('2111.11439v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.11439v2-abstract-full" style="display: none;"> Disease-modifying management aims to prevent deterioration and progression of the disease, not just relieve symptoms. Unfortunately, the development of necessary therapies is often hampered by the failure to recognize the presymptomatic disease and limited understanding of disease development. We present a generic solution for this problem by a methodology that allows the prediction of progression risk and morphology in individuals using a latent extrapolation optimization approach. To this end, we combined a regularized generative adversarial network (GAN) and a latent nearest neighbor algorithm for joint optimization to generate plausible images of future time points. We evaluated our method on osteoarthritis (OA) data from a multi-center longitudinal study (the Osteoarthritis Initiative, OAI). With presymptomatic baseline data, our model is generative and significantly outperforms the end-to-end learning model in discriminating the progressive cohort. Two experiments were performed with seven experienced radiologists. When no synthetic follow-up radiographs were provided, our model performed better than all seven radiologists. In cases where the synthetic follow-ups generated by our model were available, the specificity and sensitivity of all readers in discriminating progressors increased from $72.3\%$ to $88.6\%$ and from $42.1\%$ to $51.6\%$, respectively. Our results open up a new possibility of using model-based morphology and risk prediction to make predictions about future disease occurrence, as demonstrated in the example of OA. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.11439v2-abstract-full').style.display = 'none'; document.getElementById('2111.11439v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>