CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–50 of 56 results for author: <span class="mathjax">Reyes, M</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Reyes%2C+M">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Reyes, M"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Reyes%2C+M&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Reyes, M"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Reyes%2C+M&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Reyes%2C+M&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Reyes%2C+M&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.10727">arXiv:2501.10727</a> <span> [<a href="https://arxiv.org/pdf/2501.10727">pdf</a>, <a href="https://arxiv.org/format/2501.10727">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> In the Picture: Medical Imaging Datasets, Artifacts, and their Living Review </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jim%C3%A9nez-S%C3%A1nchez%2C+A">Amelia Jim茅nez-S谩nchez</a>, <a href="/search/cs?searchtype=author&query=Avlona%2C+N">Natalia-Rozalia Avlona</a>, <a href="/search/cs?searchtype=author&query=de+Boer%2C+S">Sarah de Boer</a>, <a href="/search/cs?searchtype=author&query=Campello%2C+V+M">V铆ctor M. Campello</a>, <a href="/search/cs?searchtype=author&query=Feragen%2C+A">Aasa Feragen</a>, <a href="/search/cs?searchtype=author&query=Ferrante%2C+E">Enzo Ferrante</a>, <a href="/search/cs?searchtype=author&query=Ganz%2C+M">Melanie Ganz</a>, <a href="/search/cs?searchtype=author&query=Gichoya%2C+J+W">Judy Wawira Gichoya</a>, <a href="/search/cs?searchtype=author&query=Gonz%C3%A1lez%2C+C">Camila Gonz谩lez</a>, <a href="/search/cs?searchtype=author&query=Groefsema%2C+S">Steff Groefsema</a>, <a href="/search/cs?searchtype=author&query=Hering%2C+A">Alessa Hering</a>, <a href="/search/cs?searchtype=author&query=Hulman%2C+A">Adam Hulman</a>, <a href="/search/cs?searchtype=author&query=Joskowicz%2C+L">Leo Joskowicz</a>, <a href="/search/cs?searchtype=author&query=Juodelyte%2C+D">Dovile Juodelyte</a>, <a href="/search/cs?searchtype=author&query=Kandemir%2C+M">Melih Kandemir</a>, <a href="/search/cs?searchtype=author&query=Kooi%2C+T">Thijs Kooi</a>, <a href="/search/cs?searchtype=author&query=L%C3%A9rida%2C+J+d+P">Jorge del Pozo L茅rida</a>, <a href="/search/cs?searchtype=author&query=Li%2C+L+Y">Livie Yumeng Li</a>, <a href="/search/cs?searchtype=author&query=Pacheco%2C+A">Andre Pacheco</a>, <a href="/search/cs?searchtype=author&query=R%C3%A4dsch%2C+T">Tim R盲dsch</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Sourget%2C+T">Th茅o Sourget</a>, <a href="/search/cs?searchtype=author&query=van+Ginneken%2C+B">Bram van Ginneken</a>, <a href="/search/cs?searchtype=author&query=Wen%2C+D">David Wen</a>, <a href="/search/cs?searchtype=author&query=Weng%2C+N">Nina Weng</a> , et al. (4 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.10727v1-abstract-short" style="display: inline;"> Datasets play a critical role in medical imaging research, yet issues such as label quality, shortcuts, and metadata are often overlooked. This lack of attention may harm the generalizability of algorithms and, consequently, negatively impact patient outcomes. While existing medical imaging literature reviews mostly focus on machine learning (ML) methods, with only a few focusing on datasets for s… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.10727v1-abstract-full').style.display = 'inline'; document.getElementById('2501.10727v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.10727v1-abstract-full" style="display: none;"> Datasets play a critical role in medical imaging research, yet issues such as label quality, shortcuts, and metadata are often overlooked. This lack of attention may harm the generalizability of algorithms and, consequently, negatively impact patient outcomes. While existing medical imaging literature reviews mostly focus on machine learning (ML) methods, with only a few focusing on datasets for specific applications, these reviews remain static -- they are published once and not updated thereafter. This fails to account for emerging evidence, such as biases, shortcuts, and additional annotations that other researchers may contribute after the dataset is published. We refer to these newly discovered findings of datasets as research artifacts. To address this gap, we propose a living review that continuously tracks public datasets and their associated research artifacts across multiple medical imaging applications. Our approach includes a framework for the living review to monitor data documentation artifacts, and an SQL database to visualize the citation relationships between research artifact and dataset. Lastly, we discuss key considerations for creating medical imaging datasets, review best practices for data annotation, discuss the significance of shortcuts and demographic diversity, and emphasize the importance of managing datasets throughout their entire lifecycle. Our demo is publicly available at http://130.226.140.142. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.10727v1-abstract-full').style.display = 'none'; document.getElementById('2501.10727v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Manuscript under review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.02177">arXiv:2412.02177</a> <span> [<a href="https://arxiv.org/pdf/2412.02177">pdf</a>, <a href="https://arxiv.org/format/2412.02177">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Anatomically-Grounded Fact Checking of Automated Chest X-ray Reports </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mahmood%2C+R">R. Mahmood</a>, <a href="/search/cs?searchtype=author&query=Wong%2C+K+C+L">K. C. L. Wong</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+D+M">D. M. Reyes</a>, <a href="/search/cs?searchtype=author&query=D%27Souza%2C+N">N. D'Souza</a>, <a href="/search/cs?searchtype=author&query=Shi%2C+L">L. Shi</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+J">J. Wu</a>, <a href="/search/cs?searchtype=author&query=Kaviani%2C+P">P. Kaviani</a>, <a href="/search/cs?searchtype=author&query=Kalra%2C+M">M. Kalra</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+G">G. Wang</a>, <a href="/search/cs?searchtype=author&query=Yan%2C+P">P. Yan</a>, <a href="/search/cs?searchtype=author&query=Syeda-Mahmood%2C+T">T. Syeda-Mahmood</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.02177v1-abstract-short" style="display: inline;"> With the emergence of large-scale vision-language models, realistic radiology reports may be generated using only medical images as input guided by simple prompts. However, their practical utility has been limited due to the factual errors in their description of findings. In this paper, we propose a novel model for explainable fact-checking that identifies errors in findings and their locations i… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.02177v1-abstract-full').style.display = 'inline'; document.getElementById('2412.02177v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.02177v1-abstract-full" style="display: none;"> With the emergence of large-scale vision-language models, realistic radiology reports may be generated using only medical images as input guided by simple prompts. However, their practical utility has been limited due to the factual errors in their description of findings. In this paper, we propose a novel model for explainable fact-checking that identifies errors in findings and their locations indicated through the reports. Specifically, we analyze the types of errors made by automated reporting methods and derive a new synthetic dataset of images paired with real and fake descriptions of findings and their locations from a ground truth dataset. A new multi-label cross-modal contrastive regression network is then trained on this datsaset. We evaluate the resulting fact-checking model and its utility in correcting reports generated by several SOTA automated reporting tools on a variety of benchmark datasets with results pointing to over 40\% improvement in report quality through such error detection and correction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.02177v1-abstract-full').style.display = 'none'; document.getElementById('2412.02177v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> RPI12 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.01031">arXiv:2412.01031</a> <span> [<a href="https://arxiv.org/pdf/2412.01031">pdf</a>, <a href="https://arxiv.org/format/2412.01031">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Evaluating Automated Radiology Report Quality through Fine-Grained Phrasal Grounding of Clinical Findings </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mahmood%2C+R">Razi Mahmood</a>, <a href="/search/cs?searchtype=author&query=Yan%2C+P">Pingkun Yan</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+D+M">Diego Machado Reyes</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+G">Ge Wang</a>, <a href="/search/cs?searchtype=author&query=Kalra%2C+M+K">Mannudeep K. Kalra</a>, <a href="/search/cs?searchtype=author&query=Kaviani%2C+P">Parisa Kaviani</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+J+T">Joy T. Wu</a>, <a href="/search/cs?searchtype=author&query=Syeda-Mahmood%2C+T">Tanveer Syeda-Mahmood</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.01031v2-abstract-short" style="display: inline;"> Several evaluation metrics have been developed recently to automatically assess the quality of generative AI reports for chest radiographs based only on textual information using lexical, semantic, or clinical named entity recognition methods. In this paper, we develop a new method of report quality evaluation by first extracting fine-grained finding patterns capturing the location, laterality, an… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.01031v2-abstract-full').style.display = 'inline'; document.getElementById('2412.01031v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.01031v2-abstract-full" style="display: none;"> Several evaluation metrics have been developed recently to automatically assess the quality of generative AI reports for chest radiographs based only on textual information using lexical, semantic, or clinical named entity recognition methods. In this paper, we develop a new method of report quality evaluation by first extracting fine-grained finding patterns capturing the location, laterality, and severity of a large number of clinical findings. We then performed phrasal grounding to localize their associated anatomical regions on chest radiograph images. The textual and visual measures are then combined to rate the quality of the generated reports. We present results that compare this evaluation metric with other textual metrics on a gold standard dataset derived from the MIMIC collection and show its robustness and sensitivity to factual errors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.01031v2-abstract-full').style.display = 'none'; document.getElementById('2412.01031v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.18611">arXiv:2409.18611</a> <span> [<a href="https://arxiv.org/pdf/2409.18611">pdf</a>, <a href="https://arxiv.org/format/2409.18611">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> </div> <p class="title is-5 mathjax"> Differentially Private Non Parametric Copulas: Generating synthetic data with non parametric copulas under privacy guarantees </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Osorio-Marulanda%2C+P+A">Pablo A. Osorio-Marulanda</a>, <a href="/search/cs?searchtype=author&query=Ramirez%2C+J+E+C">John Esteban Castro Ramirez</a>, <a href="/search/cs?searchtype=author&query=Jim%C3%A9nez%2C+M+H">Mikel Hern谩ndez Jim茅nez</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+N+M">Nicolas Moreno Reyes</a>, <a href="/search/cs?searchtype=author&query=Unanue%2C+G+E">Gorka Epelde Unanue</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.18611v1-abstract-short" style="display: inline;"> Creation of synthetic data models has represented a significant advancement across diverse scientific fields, but this technology also brings important privacy considerations for users. This work focuses on enhancing a non-parametric copula-based synthetic data generation model, DPNPC, by incorporating Differential Privacy through an Enhanced Fourier Perturbation method. The model generates synthe… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.18611v1-abstract-full').style.display = 'inline'; document.getElementById('2409.18611v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.18611v1-abstract-full" style="display: none;"> Creation of synthetic data models has represented a significant advancement across diverse scientific fields, but this technology also brings important privacy considerations for users. This work focuses on enhancing a non-parametric copula-based synthetic data generation model, DPNPC, by incorporating Differential Privacy through an Enhanced Fourier Perturbation method. The model generates synthetic data for mixed tabular databases while preserving privacy. We compare DPNPC with three other models (PrivBayes, DP-Copula, and DP-Histogram) across three public datasets, evaluating privacy, utility, and execution time. DPNPC outperforms others in modeling multivariate dependencies, maintaining privacy for small $蔚$ values, and reducing training times. However, limitations include the need to assess the model's performance with different encoding methods and consider additional privacy attacks. Future research should address these areas to enhance privacy-preserving synthetic data generation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.18611v1-abstract-full').style.display = 'none'; document.getElementById('2409.18611v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 5 figures, deciding 2025 conference to which to submit</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 62H05; 62G32 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.6; H.2.8; G.3 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.11142">arXiv:2408.11142</a> <span> [<a href="https://arxiv.org/pdf/2408.11142">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ISLES 2024: The first longitudinal multimodal multi-center real-world dataset in (sub-)acute stroke </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Riedel%2C+E+O">Evamaria O. Riedel</a>, <a href="/search/cs?searchtype=author&query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&query=Baran%2C+T+A">The Anh Baran</a>, <a href="/search/cs?searchtype=author&query=Petzsche%2C+M+H">Moritz Hernandez Petzsche</a>, <a href="/search/cs?searchtype=author&query=Baazaoui%2C+H">Hakim Baazaoui</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&query=Robben%2C+D">David Robben</a>, <a href="/search/cs?searchtype=author&query=Seia%2C+J+O">Joaquin Oscar Seia</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Su%2C+R">Ruisheng Su</a>, <a href="/search/cs?searchtype=author&query=Zimmer%2C+C">Claus Zimmer</a>, <a href="/search/cs?searchtype=author&query=Boeckh-Behrens%2C+T">Tobias Boeckh-Behrens</a>, <a href="/search/cs?searchtype=author&query=Berndt%2C+M">Maria Berndt</a>, <a href="/search/cs?searchtype=author&query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&query=Wegener%2C+S">Susanne Wegener</a>, <a href="/search/cs?searchtype=author&query=Kirschke%2C+J+S">Jan S. Kirschke</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.11142v1-abstract-short" style="display: inline;"> Stroke remains a leading cause of global morbidity and mortality, placing a heavy socioeconomic burden. Over the past decade, advances in endovascular reperfusion therapy and the use of CT and MRI imaging for treatment guidance have significantly improved patient outcomes and are now standard in clinical practice. To develop machine learning algorithms that can extract meaningful and reproducible… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11142v1-abstract-full').style.display = 'inline'; document.getElementById('2408.11142v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.11142v1-abstract-full" style="display: none;"> Stroke remains a leading cause of global morbidity and mortality, placing a heavy socioeconomic burden. Over the past decade, advances in endovascular reperfusion therapy and the use of CT and MRI imaging for treatment guidance have significantly improved patient outcomes and are now standard in clinical practice. To develop machine learning algorithms that can extract meaningful and reproducible models of brain function for both clinical and research purposes from stroke images - particularly for lesion identification, brain health quantification, and prognosis - large, diverse, and well-annotated public datasets are essential. While only a few datasets with (sub-)acute stroke data were previously available, several large, high-quality datasets have recently been made publicly accessible. However, these existing datasets include only MRI data. In contrast, our dataset is the first to offer comprehensive longitudinal stroke data, including acute CT imaging with angiography and perfusion, follow-up MRI at 2-9 days, as well as acute and longitudinal clinical data up to a three-month outcome. The dataset includes a training dataset of n = 150 and a test dataset of n = 100 scans. Training data is publicly available, while test data will be used exclusively for model validation. We are making this dataset available as part of the 2024 edition of the Ischemic Stroke Lesion Segmentation (ISLES) challenge (https://www.isles-challenge.org/), which continuously aims to establish benchmark methods for acute and sub-acute ischemic stroke lesion segmentation, aiding in creating open stroke imaging datasets and evaluating cutting-edge image processing algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11142v1-abstract-full').style.display = 'none'; document.getElementById('2408.11142v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.10966">arXiv:2408.10966</a> <span> [<a href="https://arxiv.org/pdf/2408.10966">pdf</a>, <a href="https://arxiv.org/format/2408.10966">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ISLES'24: Improving final infarct prediction in ischemic stroke using multimodal imaging and clinical data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&query=Su%2C+R">Ruisheng Su</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&query=Riedel%2C+E+O">Evamaria O. Riedel</a>, <a href="/search/cs?searchtype=author&query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&query=Baazaoui%2C+H">Hakim Baazaoui</a>, <a href="/search/cs?searchtype=author&query=Robben%2C+D">David Robben</a>, <a href="/search/cs?searchtype=author&query=Wegener%2C+S">Susanne Wegener</a>, <a href="/search/cs?searchtype=author&query=Kirschke%2C+J+S">Jan S. Kirschke</a>, <a href="/search/cs?searchtype=author&query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.10966v1-abstract-short" style="display: inline;"> Accurate estimation of core (irreversibly damaged tissue) and penumbra (salvageable tissue) volumes is essential for ischemic stroke treatment decisions. Perfusion CT, the clinical standard, estimates these volumes but is affected by variations in deconvolution algorithms, implementations, and thresholds. Core tissue expands over time, with growth rates influenced by thrombus location, collateral… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10966v1-abstract-full').style.display = 'inline'; document.getElementById('2408.10966v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.10966v1-abstract-full" style="display: none;"> Accurate estimation of core (irreversibly damaged tissue) and penumbra (salvageable tissue) volumes is essential for ischemic stroke treatment decisions. Perfusion CT, the clinical standard, estimates these volumes but is affected by variations in deconvolution algorithms, implementations, and thresholds. Core tissue expands over time, with growth rates influenced by thrombus location, collateral circulation, and inherent patient-specific factors. Understanding this tissue growth is crucial for determining the need to transfer patients to comprehensive stroke centers, predicting the benefits of additional reperfusion attempts during mechanical thrombectomy, and forecasting final clinical outcomes. This work presents the ISLES'24 challenge, which addresses final post-treatment stroke infarct prediction from pre-interventional acute stroke imaging and clinical data. ISLES'24 establishes a unique 360-degree setting where all feasibly accessible clinical data are available for participants, including full CT acute stroke imaging, sub-acute follow-up MRI, and clinical tabular data. The contributions of this work are two-fold: first, we introduce a standardized benchmarking of final stroke infarct segmentation algorithms through the ISLES'24 challenge; second, we provide insights into infarct segmentation using multimodal imaging and clinical data strategies by identifying outperforming methods on a finely curated dataset. The outputs of this challenge are anticipated to enhance clinical decision-making and improve patient outcome predictions. All ISLES'24 materials, including data, performance evaluation scripts, and leading algorithmic strategies, are available to the research community following \url{https://isles-24.grand-challenge.org/}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10966v1-abstract-full').style.display = 'none'; document.getElementById('2408.10966v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.17032">arXiv:2406.17032</a> <span> [<a href="https://arxiv.org/pdf/2406.17032">pdf</a>, <a href="https://arxiv.org/format/2406.17032">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DWARF: Disease-weighted network for attention map refinement </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Luo%2C+H">Haozhe Luo</a>, <a href="/search/cs?searchtype=author&query=de+Mortanges%2C+A+P">Aur茅lie Pahud de Mortanges</a>, <a href="/search/cs?searchtype=author&query=Inel%2C+O">Oana Inel</a>, <a href="/search/cs?searchtype=author&query=Bernstein%2C+A">Abraham Bernstein</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.17032v2-abstract-short" style="display: inline;"> The interpretability of deep learning is crucial for evaluating the reliability of medical imaging models and reducing the risks of inaccurate patient recommendations. This study addresses the "human out of the loop" and "trustworthiness" issues in medical image analysis by integrating medical professionals into the interpretability process. We propose a disease-weighted attention map refinement n… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.17032v2-abstract-full').style.display = 'inline'; document.getElementById('2406.17032v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.17032v2-abstract-full" style="display: none;"> The interpretability of deep learning is crucial for evaluating the reliability of medical imaging models and reducing the risks of inaccurate patient recommendations. This study addresses the "human out of the loop" and "trustworthiness" issues in medical image analysis by integrating medical professionals into the interpretability process. We propose a disease-weighted attention map refinement network (DWARF) that leverages expert feedback to enhance model relevance and accuracy. Our method employs cyclic training to iteratively improve diagnostic performance, generating precise and interpretable feature maps. Experimental results demonstrate significant improvements in interpretability and diagnostic accuracy across multiple medical imaging datasets. This approach fosters effective collaboration between AI systems and healthcare professionals, ultimately aiming to improve patient outcomes <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.17032v2-abstract-full').style.display = 'none'; document.getElementById('2406.17032v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.09277">arXiv:2404.09277</a> <span> [<a href="https://arxiv.org/pdf/2404.09277">pdf</a>, <a href="https://arxiv.org/format/2404.09277">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SyntStereo2Real: Edge-Aware GAN for Remote Sensing Image-to-Image Translation while Maintaining Stereo Constraint </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Venkatesan%2C+V">Vasudha Venkatesan</a>, <a href="/search/cs?searchtype=author&query=Panangian%2C+D">Daniel Panangian</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M+F">Mario Fuentes Reyes</a>, <a href="/search/cs?searchtype=author&query=Bittner%2C+K">Ksenia Bittner</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.09277v1-abstract-short" style="display: inline;"> In the field of remote sensing, the scarcity of stereo-matched and particularly lack of accurate ground truth data often hinders the training of deep neural networks. The use of synthetically generated images as an alternative, alleviates this problem but suffers from the problem of domain generalization. Unifying the capabilities of image-to-image translation and stereo-matching presents an effec… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.09277v1-abstract-full').style.display = 'inline'; document.getElementById('2404.09277v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.09277v1-abstract-full" style="display: none;"> In the field of remote sensing, the scarcity of stereo-matched and particularly lack of accurate ground truth data often hinders the training of deep neural networks. The use of synthetically generated images as an alternative, alleviates this problem but suffers from the problem of domain generalization. Unifying the capabilities of image-to-image translation and stereo-matching presents an effective solution to address the issue of domain generalization. Current methods involve combining two networks, an unpaired image-to-image translation network and a stereo-matching network, while jointly optimizing them. We propose an edge-aware GAN-based network that effectively tackles both tasks simultaneously. We obtain edge maps of input images from the Sobel operator and use it as an additional input to the encoder in the generator to enforce geometric consistency during translation. We additionally include a warping loss calculated from the translated images to maintain the stereo consistency. We demonstrate that our model produces qualitatively and quantitatively superior results than existing models, and its applicability extends to diverse domains, including autonomous driving. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.09277v1-abstract-full').style.display = 'none'; document.getElementById('2404.09277v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to IEEE Conference on Computer Vision and Pattern Recognition Workshop (CVPRW) EarthVision</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.19425">arXiv:2403.19425</a> <span> [<a href="https://arxiv.org/pdf/2403.19425">pdf</a>, <a href="https://arxiv.org/ps/2403.19425">ps</a>, <a href="https://arxiv.org/format/2403.19425">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A Robust Ensemble Algorithm for Ischemic Stroke Lesion Segmentation: Generalizability and Clinical Utility Beyond the ISLES Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Liew%2C+S">Sook-Lei Liew</a>, <a href="/search/cs?searchtype=author&query=Hutton%2C+A">Alexandre Hutton</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&query=Kaesmacher%2C+J">Johannes Kaesmacher</a>, <a href="/search/cs?searchtype=author&query=Hanning%2C+U">Uta Hanning</a>, <a href="/search/cs?searchtype=author&query=Hakim%2C+A">Arsany Hakim</a>, <a href="/search/cs?searchtype=author&query=Zubal%2C+R">Richard Zubal</a>, <a href="/search/cs?searchtype=author&query=Valenzuela%2C+W">Waldo Valenzuela</a>, <a href="/search/cs?searchtype=author&query=Robben%2C+D">David Robben</a>, <a href="/search/cs?searchtype=author&query=Sima%2C+D+M">Diana M. Sima</a>, <a href="/search/cs?searchtype=author&query=Anania%2C+V">Vincenzo Anania</a>, <a href="/search/cs?searchtype=author&query=Brys%2C+A">Arne Brys</a>, <a href="/search/cs?searchtype=author&query=Meakin%2C+J+A">James A. Meakin</a>, <a href="/search/cs?searchtype=author&query=Mickan%2C+A">Anne Mickan</a>, <a href="/search/cs?searchtype=author&query=Broocks%2C+G">Gabriel Broocks</a>, <a href="/search/cs?searchtype=author&query=Heitkamp%2C+C">Christian Heitkamp</a>, <a href="/search/cs?searchtype=author&query=Gao%2C+S">Shengbo Gao</a>, <a href="/search/cs?searchtype=author&query=Liang%2C+K">Kongming Liang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+Z">Ziji Zhang</a>, <a href="/search/cs?searchtype=author&query=Siddiquee%2C+M+M+R">Md Mahfuzur Rahman Siddiquee</a>, <a href="/search/cs?searchtype=author&query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/cs?searchtype=author&query=Ashtari%2C+P">Pooya Ashtari</a>, <a href="/search/cs?searchtype=author&query=Van+Huffel%2C+S">Sabine Van Huffel</a> , et al. (33 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.19425v2-abstract-short" style="display: inline;"> Diffusion-weighted MRI (DWI) is essential for stroke diagnosis, treatment decisions, and prognosis. However, image and disease variability hinder the development of generalizable AI algorithms with clinical value. We address this gap by presenting a novel ensemble algorithm derived from the 2022 Ischemic Stroke Lesion Segmentation (ISLES) challenge. ISLES'22 provided 400 patient scans with ischemi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19425v2-abstract-full').style.display = 'inline'; document.getElementById('2403.19425v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.19425v2-abstract-full" style="display: none;"> Diffusion-weighted MRI (DWI) is essential for stroke diagnosis, treatment decisions, and prognosis. However, image and disease variability hinder the development of generalizable AI algorithms with clinical value. We address this gap by presenting a novel ensemble algorithm derived from the 2022 Ischemic Stroke Lesion Segmentation (ISLES) challenge. ISLES'22 provided 400 patient scans with ischemic stroke from various medical centers, facilitating the development of a wide range of cutting-edge segmentation algorithms by the research community. Through collaboration with leading teams, we combined top-performing algorithms into an ensemble model that overcomes the limitations of individual solutions. Our ensemble model achieved superior ischemic lesion detection and segmentation accuracy on our internal test set compared to individual algorithms. This accuracy generalized well across diverse image and disease variables. Furthermore, the model excelled in extracting clinical biomarkers. Notably, in a Turing-like test, neuroradiologists consistently preferred the algorithm's segmentations over manual expert efforts, highlighting increased comprehensiveness and precision. Validation using a real-world external dataset (N=1686) confirmed the model's generalizability. The algorithm's outputs also demonstrated strong correlations with clinical scores (admission NIHSS and 90-day mRS) on par with or exceeding expert-derived results, underlining its clinical relevance. This study offers two key findings. First, we present an ensemble algorithm (https://github.com/Tabrisrei/ISLES22_Ensemble) that detects and segments ischemic stroke lesions on DWI across diverse scenarios on par with expert (neuro)radiologists. Second, we show the potential for biomedical challenge outputs to extend beyond the challenge's initial objectives, demonstrating their real-world clinical applicability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19425v2-abstract-full').style.display = 'none'; document.getElementById('2403.19425v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.00137">arXiv:2402.00137</a> <span> [<a href="https://arxiv.org/pdf/2402.00137">pdf</a>, <a href="https://arxiv.org/format/2402.00137">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Multimodal Neurodegenerative Disease Subtyping Explained by ChatGPT </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Reyes%2C+D+M">Diego Machado Reyes</a>, <a href="/search/cs?searchtype=author&query=Chao%2C+H">Hanqing Chao</a>, <a href="/search/cs?searchtype=author&query=Hahn%2C+J">Juergen Hahn</a>, <a href="/search/cs?searchtype=author&query=Shen%2C+L">Li Shen</a>, <a href="/search/cs?searchtype=author&query=Yan%2C+P">Pingkun Yan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.00137v1-abstract-short" style="display: inline;"> Alzheimer's disease (AD) is the most prevalent neurodegenerative disease; yet its currently available treatments are limited to stopping disease progression. Moreover, effectiveness of these treatments is not guaranteed due to the heterogenetiy of the disease. Therefore, it is essential to be able to identify the disease subtypes at a very early stage. Current data driven approaches are able to cl… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.00137v1-abstract-full').style.display = 'inline'; document.getElementById('2402.00137v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.00137v1-abstract-full" style="display: none;"> Alzheimer's disease (AD) is the most prevalent neurodegenerative disease; yet its currently available treatments are limited to stopping disease progression. Moreover, effectiveness of these treatments is not guaranteed due to the heterogenetiy of the disease. Therefore, it is essential to be able to identify the disease subtypes at a very early stage. Current data driven approaches are able to classify the subtypes at later stages of AD or related disorders, but struggle when predicting at the asymptomatic or prodromal stage. Moreover, most existing models either lack explainability behind the classification or only use a single modality for the assessment, limiting scope of its analysis. Thus, we propose a multimodal framework that uses early-stage indicators such as imaging, genetics and clinical assessments to classify AD patients into subtypes at early stages. Similarly, we build prompts and use large language models, such as ChatGPT, to interpret the findings of our model. In our framework, we propose a tri-modal co-attention mechanism (Tri-COAT) to explicitly learn the cross-modal feature associations. Our proposed model outperforms baseline models and provides insight into key cross-modal feature associations supported by known biological mechanisms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.00137v1-abstract-full').style.display = 'none'; document.getElementById('2402.00137v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.14256">arXiv:2401.14256</a> <span> [<a href="https://arxiv.org/pdf/2401.14256">pdf</a>, <a href="https://arxiv.org/format/2401.14256">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Producing Plankton Classifiers that are Robust to Dataset Shift </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chen%2C+C">Cheng Chen</a>, <a href="/search/cs?searchtype=author&query=Kyathanahally%2C+S">Sreenath Kyathanahally</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Marta Reyes</a>, <a href="/search/cs?searchtype=author&query=Merkli%2C+S">Stefanie Merkli</a>, <a href="/search/cs?searchtype=author&query=Merz%2C+E">Ewa Merz</a>, <a href="/search/cs?searchtype=author&query=Francazi%2C+E">Emanuele Francazi</a>, <a href="/search/cs?searchtype=author&query=Hoege%2C+M">Marvin Hoege</a>, <a href="/search/cs?searchtype=author&query=Pomati%2C+F">Francesco Pomati</a>, <a href="/search/cs?searchtype=author&query=Baity-Jesi%2C+M">Marco Baity-Jesi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.14256v1-abstract-short" style="display: inline;"> Modern plankton high-throughput monitoring relies on deep learning classifiers for species recognition in water ecosystems. Despite satisfactory nominal performances, a significant challenge arises from Dataset Shift, which causes performances to drop during deployment. In our study, we integrate the ZooLake dataset with manually-annotated images from 10 independent days of deployment, serving as… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.14256v1-abstract-full').style.display = 'inline'; document.getElementById('2401.14256v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.14256v1-abstract-full" style="display: none;"> Modern plankton high-throughput monitoring relies on deep learning classifiers for species recognition in water ecosystems. Despite satisfactory nominal performances, a significant challenge arises from Dataset Shift, which causes performances to drop during deployment. In our study, we integrate the ZooLake dataset with manually-annotated images from 10 independent days of deployment, serving as test cells to benchmark Out-Of-Dataset (OOD) performances. Our analysis reveals instances where classifiers, initially performing well in In-Dataset conditions, encounter notable failures in practical scenarios. For example, a MobileNet with a 92% nominal test accuracy shows a 77% OOD accuracy. We systematically investigate conditions leading to OOD performance drops and propose a preemptive assessment method to identify potential pitfalls when classifying new data, and pinpoint features in OOD images that adversely impact classification. We present a three-step pipeline: (i) identifying OOD degradation compared to nominal test performance, (ii) conducting a diagnostic analysis of degradation causes, and (iii) providing solutions. We find that ensembles of BEiT vision transformers, with targeted augmentations addressing OOD robustness, geometric ensembling, and rotation-based test-time augmentation, constitute the most robust model, which we call BEsT model. It achieves an 83% OOD accuracy, with errors concentrated on container classes. Moreover, it exhibits lower sensitivity to dataset shift, and reproduces well the plankton abundances. Our proposed pipeline is applicable to generic plankton classifiers, contingent on the availability of suitable test cells. By identifying critical shortcomings and offering practical procedures to fortify models against dataset shift, our study contributes to the development of more reliable plankton classification technologies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.14256v1-abstract-full').style.display = 'none'; document.getElementById('2401.14256v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.16139">arXiv:2308.16139</a> <span> [<a href="https://arxiv.org/pdf/2308.16139">pdf</a>, <a href="https://arxiv.org/format/2308.16139">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> MedShapeNet -- A Large-Scale Dataset of 3D Medical Shapes for Computer Vision </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+J">Jianning Li</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+Z">Zongwei Zhou</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+J">Jiancheng Yang</a>, <a href="/search/cs?searchtype=author&query=Pepe%2C+A">Antonio Pepe</a>, <a href="/search/cs?searchtype=author&query=Gsaxner%2C+C">Christina Gsaxner</a>, <a href="/search/cs?searchtype=author&query=Luijten%2C+G">Gijs Luijten</a>, <a href="/search/cs?searchtype=author&query=Qu%2C+C">Chongyu Qu</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+T">Tiezheng Zhang</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+X">Xiaoxi Chen</a>, <a href="/search/cs?searchtype=author&query=Li%2C+W">Wenxuan Li</a>, <a href="/search/cs?searchtype=author&query=Wodzinski%2C+M">Marek Wodzinski</a>, <a href="/search/cs?searchtype=author&query=Friedrich%2C+P">Paul Friedrich</a>, <a href="/search/cs?searchtype=author&query=Xie%2C+K">Kangxian Xie</a>, <a href="/search/cs?searchtype=author&query=Jin%2C+Y">Yuan Jin</a>, <a href="/search/cs?searchtype=author&query=Ambigapathy%2C+N">Narmada Ambigapathy</a>, <a href="/search/cs?searchtype=author&query=Nasca%2C+E">Enrico Nasca</a>, <a href="/search/cs?searchtype=author&query=Solak%2C+N">Naida Solak</a>, <a href="/search/cs?searchtype=author&query=Melito%2C+G+M">Gian Marco Melito</a>, <a href="/search/cs?searchtype=author&query=Vu%2C+V+D">Viet Duc Vu</a>, <a href="/search/cs?searchtype=author&query=Memon%2C+A+R">Afaque R. Memon</a>, <a href="/search/cs?searchtype=author&query=Schlachta%2C+C">Christopher Schlachta</a>, <a href="/search/cs?searchtype=author&query=De+Ribaupierre%2C+S">Sandrine De Ribaupierre</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+R">Rajnikant Patel</a>, <a href="/search/cs?searchtype=author&query=Eagleson%2C+R">Roy Eagleson</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+X">Xiaojun Chen</a> , et al. (132 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.16139v5-abstract-short" style="display: inline;"> Prior to the deep learning era, shape was commonly used to describe the objects. Nowadays, state-of-the-art (SOTA) algorithms in medical imaging are predominantly diverging from computer vision, where voxel grids, meshes, point clouds, and implicit surface models are used. This is seen from numerous shape-related publications in premier vision conferences as well as the growing popularity of Shape… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.16139v5-abstract-full').style.display = 'inline'; document.getElementById('2308.16139v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.16139v5-abstract-full" style="display: none;"> Prior to the deep learning era, shape was commonly used to describe the objects. Nowadays, state-of-the-art (SOTA) algorithms in medical imaging are predominantly diverging from computer vision, where voxel grids, meshes, point clouds, and implicit surface models are used. This is seen from numerous shape-related publications in premier vision conferences as well as the growing popularity of ShapeNet (about 51,300 models) and Princeton ModelNet (127,915 models). For the medical domain, we present a large collection of anatomical shapes (e.g., bones, organs, vessels) and 3D models of surgical instrument, called MedShapeNet, created to facilitate the translation of data-driven vision algorithms to medical applications and to adapt SOTA vision algorithms to medical problems. As a unique feature, we directly model the majority of shapes on the imaging data of real patients. As of today, MedShapeNet includes 23 dataset with more than 100,000 shapes that are paired with annotations (ground truth). Our data is freely accessible via a web interface and a Python application programming interface (API) and can be used for discriminative, reconstructive, and variational benchmarks as well as various applications in virtual, augmented, or mixed reality, and 3D printing. Exemplary, we present use cases in the fields of classification of brain tumors, facial and skull reconstructions, multi-class anatomy completion, education, and 3D printing. In future, we will extend the data and improve the interfaces. The project pages are: https://medshapenet.ikim.nrw/ and https://github.com/Jianningli/medshapenet-feedback <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.16139v5-abstract-full').style.display = 'none'; document.getElementById('2308.16139v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T01 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.01925">arXiv:2305.01925</a> <span> [<a href="https://arxiv.org/pdf/2305.01925">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> INDCOR White Paper 2: Interactive Narrative Design for Representing Complexity </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Perkis%2C+A">Andrew Perkis</a>, <a href="/search/cs?searchtype=author&query=Bellini%2C+M">Mattia Bellini</a>, <a href="/search/cs?searchtype=author&query=Nisi%2C+V">Valentina Nisi</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M+C">Maria Cecilia Reyes</a>, <a href="/search/cs?searchtype=author&query=Sylla%2C+C">Cristina Sylla</a>, <a href="/search/cs?searchtype=author&query=Santa%2C+M">Mijalche Santa</a>, <a href="/search/cs?searchtype=author&query=Zaluczkowska%2C+A">Anna Zaluczkowska</a>, <a href="/search/cs?searchtype=author&query=Irshad%2C+S">Shafaq Irshad</a>, <a href="/search/cs?searchtype=author&query=Bakk%2C+%C3%81">脕gnes Bakk</a>, <a href="/search/cs?searchtype=author&query=Barnab%C3%A9%2C+F">Fanny Barnab茅</a>, <a href="/search/cs?searchtype=author&query=Barnard%2C+D">Daniel Barnard</a>, <a href="/search/cs?searchtype=author&query=Boukhelifa%2C+N">Nadia Boukhelifa</a>, <a href="/search/cs?searchtype=author&query=Klungre%2C+%C3%98+S">脴yvind S酶rdal Klungre</a>, <a href="/search/cs?searchtype=author&query=Koenitz%2C+H">Hartmut Koenitz</a>, <a href="/search/cs?searchtype=author&query=Lombardo%2C+V">Vincenzo Lombardo</a>, <a href="/search/cs?searchtype=author&query=Elahdhari%2C+M+P">Mirjam Palosaari Elahdhari</a>, <a href="/search/cs?searchtype=author&query=Prandi%2C+C">Catia Prandi</a>, <a href="/search/cs?searchtype=author&query=Rettberg%2C+S">Scott Rettberg</a>, <a href="/search/cs?searchtype=author&query=Serbanescu%2C+A">Anca Serbanescu</a>, <a href="/search/cs?searchtype=author&query=Sousa%2C+S">Sonia Sousa</a>, <a href="/search/cs?searchtype=author&query=Stefaneas%2C+P">Petros Stefaneas</a>, <a href="/search/cs?searchtype=author&query=Uzunov%2C+D">Dimitar Uzunov</a>, <a href="/search/cs?searchtype=author&query=Vosmeer%2C+M">Mirjam Vosmeer</a>, <a href="/search/cs?searchtype=author&query=Wardaszko%2C+M">Marcin Wardaszko</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.01925v5-abstract-short" style="display: inline;"> This white paper was written by the members of the Work Group focusing on design practices of the COST Action 18230 - Interactive Narrative Design for Complexity Representation (INDCOR, WG1). It presents an overview of Interactive Digital Narratives (IDNs) design for complexity representations through IDN workflows and methodologies, IDN authoring tools and applications. It provides definitions of… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.01925v5-abstract-full').style.display = 'inline'; document.getElementById('2305.01925v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.01925v5-abstract-full" style="display: none;"> This white paper was written by the members of the Work Group focusing on design practices of the COST Action 18230 - Interactive Narrative Design for Complexity Representation (INDCOR, WG1). It presents an overview of Interactive Digital Narratives (IDNs) design for complexity representations through IDN workflows and methodologies, IDN authoring tools and applications. It provides definitions of the central elements of the IDN alongside its best practices, designs and methods. Finally, it describes complexity as a feature of IDN, with related examples. In summary, this white paper serves as an orienting map for the field of IDN design, understanding where we are in the contemporary panorama while charting the grounds of their promising futures. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.01925v5-abstract-full').style.display = 'none'; document.getElementById('2305.01925v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, This whitepaper was produced by members of the COST Action 18230 - Interactive Narrative Design for Complexity Representation (INDCOR - https://indcor.eu)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.01790">arXiv:2302.01790</a> <span> [<a href="https://arxiv.org/pdf/2302.01790">pdf</a>, <a href="https://arxiv.org/format/2302.01790">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41592-023-02150-0">10.1038/s41592-023-02150-0 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Understanding metric-related pitfalls in image analysis validation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Reinke%2C+A">Annika Reinke</a>, <a href="/search/cs?searchtype=author&query=Tizabi%2C+M+D">Minu D. Tizabi</a>, <a href="/search/cs?searchtype=author&query=Baumgartner%2C+M">Michael Baumgartner</a>, <a href="/search/cs?searchtype=author&query=Eisenmann%2C+M">Matthias Eisenmann</a>, <a href="/search/cs?searchtype=author&query=Heckmann-N%C3%B6tzel%2C+D">Doreen Heckmann-N枚tzel</a>, <a href="/search/cs?searchtype=author&query=Kavur%2C+A+E">A. Emre Kavur</a>, <a href="/search/cs?searchtype=author&query=R%C3%A4dsch%2C+T">Tim R盲dsch</a>, <a href="/search/cs?searchtype=author&query=Sudre%2C+C+H">Carole H. Sudre</a>, <a href="/search/cs?searchtype=author&query=Acion%2C+L">Laura Acion</a>, <a href="/search/cs?searchtype=author&query=Antonelli%2C+M">Michela Antonelli</a>, <a href="/search/cs?searchtype=author&query=Arbel%2C+T">Tal Arbel</a>, <a href="/search/cs?searchtype=author&query=Bakas%2C+S">Spyridon Bakas</a>, <a href="/search/cs?searchtype=author&query=Benis%2C+A">Arriel Benis</a>, <a href="/search/cs?searchtype=author&query=Blaschko%2C+M">Matthew Blaschko</a>, <a href="/search/cs?searchtype=author&query=Buettner%2C+F">Florian Buettner</a>, <a href="/search/cs?searchtype=author&query=Cardoso%2C+M+J">M. Jorge Cardoso</a>, <a href="/search/cs?searchtype=author&query=Cheplygina%2C+V">Veronika Cheplygina</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+J">Jianxu Chen</a>, <a href="/search/cs?searchtype=author&query=Christodoulou%2C+E">Evangelia Christodoulou</a>, <a href="/search/cs?searchtype=author&query=Cimini%2C+B+A">Beth A. Cimini</a>, <a href="/search/cs?searchtype=author&query=Collins%2C+G+S">Gary S. Collins</a>, <a href="/search/cs?searchtype=author&query=Farahani%2C+K">Keyvan Farahani</a>, <a href="/search/cs?searchtype=author&query=Ferrer%2C+L">Luciana Ferrer</a>, <a href="/search/cs?searchtype=author&query=Galdran%2C+A">Adrian Galdran</a>, <a href="/search/cs?searchtype=author&query=van+Ginneken%2C+B">Bram van Ginneken</a> , et al. (53 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.01790v4-abstract-short" style="display: inline;"> Validation metrics are key for the reliable tracking of scientific progress and for bridging the current chasm between artificial intelligence (AI) research and its translation into practice. However, increasing evidence shows that particularly in image analysis, metrics are often chosen inadequately in relation to the underlying research problem. This could be attributed to a lack of accessibilit… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.01790v4-abstract-full').style.display = 'inline'; document.getElementById('2302.01790v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.01790v4-abstract-full" style="display: none;"> Validation metrics are key for the reliable tracking of scientific progress and for bridging the current chasm between artificial intelligence (AI) research and its translation into practice. However, increasing evidence shows that particularly in image analysis, metrics are often chosen inadequately in relation to the underlying research problem. This could be attributed to a lack of accessibility of metric-related knowledge: While taking into account the individual strengths, weaknesses, and limitations of validation metrics is a critical prerequisite to making educated choices, the relevant knowledge is currently scattered and poorly accessible to individual researchers. Based on a multi-stage Delphi process conducted by a multidisciplinary expert consortium as well as extensive community feedback, the present work provides the first reliable and comprehensive common point of access to information on pitfalls related to validation metrics in image analysis. Focusing on biomedical image analysis but with the potential of transfer to other fields, the addressed pitfalls generalize across application domains and are categorized according to a newly created, domain-agnostic taxonomy. To facilitate comprehension, illustrations and specific examples accompany each pitfall. As a structured body of information accessible to researchers of all levels of expertise, this work enhances global comprehension of a key topic in image analysis validation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.01790v4-abstract-full').style.display = 'none'; document.getElementById('2302.01790v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Shared first authors: Annika Reinke and Minu D. Tizabi; shared senior authors: Lena Maier-Hein and Paul F. J盲ger. Published in Nature Methods. arXiv admin note: text overlap with arXiv:2206.01653</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Nature methods, 1-13 (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.05545">arXiv:2208.05545</a> <span> [<a href="https://arxiv.org/pdf/2208.05545">pdf</a>, <a href="https://arxiv.org/format/2208.05545">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> The Moral Foundations Reddit Corpus </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Trager%2C+J">Jackson Trager</a>, <a href="/search/cs?searchtype=author&query=Ziabari%2C+A+S">Alireza S. Ziabari</a>, <a href="/search/cs?searchtype=author&query=Davani%2C+A+M">Aida Mostafazadeh Davani</a>, <a href="/search/cs?searchtype=author&query=Golazizian%2C+P">Preni Golazizian</a>, <a href="/search/cs?searchtype=author&query=Karimi-Malekabadi%2C+F">Farzan Karimi-Malekabadi</a>, <a href="/search/cs?searchtype=author&query=Omrani%2C+A">Ali Omrani</a>, <a href="/search/cs?searchtype=author&query=Li%2C+Z">Zhihe Li</a>, <a href="/search/cs?searchtype=author&query=Kennedy%2C+B">Brendan Kennedy</a>, <a href="/search/cs?searchtype=author&query=Reimer%2C+N+K">Nils Karl Reimer</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Melissa Reyes</a>, <a href="/search/cs?searchtype=author&query=Cheng%2C+K">Kelsey Cheng</a>, <a href="/search/cs?searchtype=author&query=Wei%2C+M">Mellow Wei</a>, <a href="/search/cs?searchtype=author&query=Merrifield%2C+C">Christina Merrifield</a>, <a href="/search/cs?searchtype=author&query=Khosravi%2C+A">Arta Khosravi</a>, <a href="/search/cs?searchtype=author&query=Alvarez%2C+E">Evans Alvarez</a>, <a href="/search/cs?searchtype=author&query=Dehghani%2C+M">Morteza Dehghani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.05545v2-abstract-short" style="display: inline;"> Moral framing and sentiment can affect a variety of online and offline behaviors, including donation, pro-environmental action, political engagement, and even participation in violent protests. Various computational methods in Natural Language Processing (NLP) have been used to detect moral sentiment from textual data, but in order to achieve better performances in such subjective tasks, large set… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.05545v2-abstract-full').style.display = 'inline'; document.getElementById('2208.05545v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.05545v2-abstract-full" style="display: none;"> Moral framing and sentiment can affect a variety of online and offline behaviors, including donation, pro-environmental action, political engagement, and even participation in violent protests. Various computational methods in Natural Language Processing (NLP) have been used to detect moral sentiment from textual data, but in order to achieve better performances in such subjective tasks, large sets of hand-annotated training data are needed. Previous corpora annotated for moral sentiment have proven valuable, and have generated new insights both within NLP and across the social sciences, but have been limited to Twitter. To facilitate improving our understanding of the role of moral rhetoric, we present the Moral Foundations Reddit Corpus, a collection of 16,123 Reddit comments that have been curated from 12 distinct subreddits, hand-annotated by at least three trained annotators for 8 categories of moral sentiment (i.e., Care, Proportionality, Equality, Purity, Authority, Loyalty, Thin Morality, Implicit/Explicit Morality) based on the updated Moral Foundations Theory (MFT) framework. We use a range of methodologies to provide baseline moral-sentiment classification results for this new corpus, e.g., cross-domain classification and knowledge transfer. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.05545v2-abstract-full').style.display = 'none'; document.getElementById('2208.05545v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.06694">arXiv:2206.06694</a> <span> [<a href="https://arxiv.org/pdf/2206.06694">pdf</a>, <a href="https://arxiv.org/format/2206.06694">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41597-022-01875-5">10.1038/s41597-022-01875-5 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> ISLES 2022: A multi-center magnetic resonance imaging stroke lesion segmentation dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Petzsche%2C+M+R+H">Moritz Roman Hernandez Petzsche</a>, <a href="/search/cs?searchtype=author&query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&query=Hanning%2C+U">Uta Hanning</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&query=Pinilla%2C+W+E+V">Waldo Enrique Valenzuela Pinilla</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Meyer%2C+M+I">Maria Ines Meyer</a>, <a href="/search/cs?searchtype=author&query=Liew%2C+S">Sook-Lei Liew</a>, <a href="/search/cs?searchtype=author&query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&query=Robben%2C+D">David Robben</a>, <a href="/search/cs?searchtype=author&query=Hutton%2C+A">Alexander Hutton</a>, <a href="/search/cs?searchtype=author&query=Friedrich%2C+T">Tassilo Friedrich</a>, <a href="/search/cs?searchtype=author&query=Zarth%2C+T">Teresa Zarth</a>, <a href="/search/cs?searchtype=author&query=B%C3%BCrkle%2C+J">Johannes B眉rkle</a>, <a href="/search/cs?searchtype=author&query=Baran%2C+T+A">The Anh Baran</a>, <a href="/search/cs?searchtype=author&query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&query=Broocks%2C+G">Gabriel Broocks</a>, <a href="/search/cs?searchtype=author&query=Meyer%2C+L">Lukas Meyer</a>, <a href="/search/cs?searchtype=author&query=Zimmer%2C+C">Claus Zimmer</a>, <a href="/search/cs?searchtype=author&query=Boeckh-Behrens%2C+T">Tobias Boeckh-Behrens</a>, <a href="/search/cs?searchtype=author&query=Berndt%2C+M">Maria Berndt</a>, <a href="/search/cs?searchtype=author&query=Ikenberg%2C+B">Benno Ikenberg</a>, <a href="/search/cs?searchtype=author&query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&query=Kirschke%2C+J+S">Jan S. Kirschke</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.06694v1-abstract-short" style="display: inline;"> Magnetic resonance imaging (MRI) is a central modality for stroke imaging. It is used upon patient admission to make treatment decisions such as selecting patients for intravenous thrombolysis or endovascular therapy. MRI is later used in the duration of hospital stay to predict outcome by visualizing infarct core size and location. Furthermore, it may be used to characterize stroke etiology, e.g.… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.06694v1-abstract-full').style.display = 'inline'; document.getElementById('2206.06694v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.06694v1-abstract-full" style="display: none;"> Magnetic resonance imaging (MRI) is a central modality for stroke imaging. It is used upon patient admission to make treatment decisions such as selecting patients for intravenous thrombolysis or endovascular therapy. MRI is later used in the duration of hospital stay to predict outcome by visualizing infarct core size and location. Furthermore, it may be used to characterize stroke etiology, e.g. differentiation between (cardio)-embolic and non-embolic stroke. Computer based automated medical image processing is increasingly finding its way into clinical routine. Previous iterations of the Ischemic Stroke Lesion Segmentation (ISLES) challenge have aided in the generation of identifying benchmark methods for acute and sub-acute ischemic stroke lesion segmentation. Here we introduce an expert-annotated, multicenter MRI dataset for segmentation of acute to subacute stroke lesions. This dataset comprises 400 multi-vendor MRI cases with high variability in stroke lesion size, quantity and location. It is split into a training dataset of n=250 and a test dataset of n=150. All training data will be made publicly available. The test dataset will be used for model validation only and will not be released to the public. This dataset serves as the foundation of the ISLES 2022 challenge with the goal of finding algorithmic methods to enable the development and benchmarking of robust and accurate segmentation algorithms for ischemic stroke. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.06694v1-abstract-full').style.display = 'none'; document.getElementById('2206.06694v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 2 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Scientific data 9.1 (2022): 762 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.01653">arXiv:2206.01653</a> <span> [<a href="https://arxiv.org/pdf/2206.01653">pdf</a>, <a href="https://arxiv.org/format/2206.01653">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41592-023-02151-z">10.1038/s41592-023-02151-z <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Metrics reloaded: Recommendations for image analysis validation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Maier-Hein%2C+L">Lena Maier-Hein</a>, <a href="/search/cs?searchtype=author&query=Reinke%2C+A">Annika Reinke</a>, <a href="/search/cs?searchtype=author&query=Godau%2C+P">Patrick Godau</a>, <a href="/search/cs?searchtype=author&query=Tizabi%2C+M+D">Minu D. Tizabi</a>, <a href="/search/cs?searchtype=author&query=Buettner%2C+F">Florian Buettner</a>, <a href="/search/cs?searchtype=author&query=Christodoulou%2C+E">Evangelia Christodoulou</a>, <a href="/search/cs?searchtype=author&query=Glocker%2C+B">Ben Glocker</a>, <a href="/search/cs?searchtype=author&query=Isensee%2C+F">Fabian Isensee</a>, <a href="/search/cs?searchtype=author&query=Kleesiek%2C+J">Jens Kleesiek</a>, <a href="/search/cs?searchtype=author&query=Kozubek%2C+M">Michal Kozubek</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Riegler%2C+M+A">Michael A. Riegler</a>, <a href="/search/cs?searchtype=author&query=Wiesenfarth%2C+M">Manuel Wiesenfarth</a>, <a href="/search/cs?searchtype=author&query=Kavur%2C+A+E">A. Emre Kavur</a>, <a href="/search/cs?searchtype=author&query=Sudre%2C+C+H">Carole H. Sudre</a>, <a href="/search/cs?searchtype=author&query=Baumgartner%2C+M">Michael Baumgartner</a>, <a href="/search/cs?searchtype=author&query=Eisenmann%2C+M">Matthias Eisenmann</a>, <a href="/search/cs?searchtype=author&query=Heckmann-N%C3%B6tzel%2C+D">Doreen Heckmann-N枚tzel</a>, <a href="/search/cs?searchtype=author&query=R%C3%A4dsch%2C+T">Tim R盲dsch</a>, <a href="/search/cs?searchtype=author&query=Acion%2C+L">Laura Acion</a>, <a href="/search/cs?searchtype=author&query=Antonelli%2C+M">Michela Antonelli</a>, <a href="/search/cs?searchtype=author&query=Arbel%2C+T">Tal Arbel</a>, <a href="/search/cs?searchtype=author&query=Bakas%2C+S">Spyridon Bakas</a>, <a href="/search/cs?searchtype=author&query=Benis%2C+A">Arriel Benis</a>, <a href="/search/cs?searchtype=author&query=Blaschko%2C+M">Matthew Blaschko</a> , et al. (49 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.01653v8-abstract-short" style="display: inline;"> Increasing evidence shows that flaws in machine learning (ML) algorithm validation are an underestimated global problem. Particularly in automatic biomedical image analysis, chosen performance metrics often do not reflect the domain interest, thus failing to adequately measure scientific progress and hindering translation of ML techniques into practice. To overcome this, our large international ex… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.01653v8-abstract-full').style.display = 'inline'; document.getElementById('2206.01653v8-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.01653v8-abstract-full" style="display: none;"> Increasing evidence shows that flaws in machine learning (ML) algorithm validation are an underestimated global problem. Particularly in automatic biomedical image analysis, chosen performance metrics often do not reflect the domain interest, thus failing to adequately measure scientific progress and hindering translation of ML techniques into practice. To overcome this, our large international expert consortium created Metrics Reloaded, a comprehensive framework guiding researchers in the problem-aware selection of metrics. Following the convergence of ML methodology across application domains, Metrics Reloaded fosters the convergence of validation methodology. The framework was developed in a multi-stage Delphi process and is based on the novel concept of a problem fingerprint - a structured representation of the given problem that captures all aspects that are relevant for metric selection, from the domain interest to the properties of the target structure(s), data set and algorithm output. Based on the problem fingerprint, users are guided through the process of choosing and applying appropriate validation metrics while being made aware of potential pitfalls. Metrics Reloaded targets image analysis problems that can be interpreted as a classification task at image, object or pixel level, namely image-level classification, object detection, semantic segmentation, and instance segmentation tasks. To improve the user experience, we implemented the framework in the Metrics Reloaded online tool, which also provides a point of access to explore weaknesses, strengths and specific recommendations for the most common validation metrics. The broad applicability of our framework across domains is demonstrated by an instantiation for various biological and medical image analysis use cases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.01653v8-abstract-full').style.display = 'none'; document.getElementById('2206.01653v8-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Shared first authors: Lena Maier-Hein, Annika Reinke. arXiv admin note: substantial text overlap with arXiv:2104.05642 Published in Nature Methods</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Nature methods, 1-18 (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.10836">arXiv:2204.10836</a> <span> [<a href="https://arxiv.org/pdf/2204.10836">pdf</a>, <a href="https://arxiv.org/format/2204.10836">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41467-022-33407-5">10.1038/s41467-022-33407-5 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Federated Learning Enables Big Data for Rare Cancer Boundary Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Pati%2C+S">Sarthak Pati</a>, <a href="/search/cs?searchtype=author&query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&query=Edwards%2C+B">Brandon Edwards</a>, <a href="/search/cs?searchtype=author&query=Sheller%2C+M">Micah Sheller</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+S">Shih-Han Wang</a>, <a href="/search/cs?searchtype=author&query=Reina%2C+G+A">G Anthony Reina</a>, <a href="/search/cs?searchtype=author&query=Foley%2C+P">Patrick Foley</a>, <a href="/search/cs?searchtype=author&query=Gruzdev%2C+A">Alexey Gruzdev</a>, <a href="/search/cs?searchtype=author&query=Karkada%2C+D">Deepthi Karkada</a>, <a href="/search/cs?searchtype=author&query=Davatzikos%2C+C">Christos Davatzikos</a>, <a href="/search/cs?searchtype=author&query=Sako%2C+C">Chiharu Sako</a>, <a href="/search/cs?searchtype=author&query=Ghodasara%2C+S">Satyam Ghodasara</a>, <a href="/search/cs?searchtype=author&query=Bilello%2C+M">Michel Bilello</a>, <a href="/search/cs?searchtype=author&query=Mohan%2C+S">Suyash Mohan</a>, <a href="/search/cs?searchtype=author&query=Vollmuth%2C+P">Philipp Vollmuth</a>, <a href="/search/cs?searchtype=author&query=Brugnara%2C+G">Gianluca Brugnara</a>, <a href="/search/cs?searchtype=author&query=Preetha%2C+C+J">Chandrakanth J Preetha</a>, <a href="/search/cs?searchtype=author&query=Sahm%2C+F">Felix Sahm</a>, <a href="/search/cs?searchtype=author&query=Maier-Hein%2C+K">Klaus Maier-Hein</a>, <a href="/search/cs?searchtype=author&query=Zenk%2C+M">Maximilian Zenk</a>, <a href="/search/cs?searchtype=author&query=Bendszus%2C+M">Martin Bendszus</a>, <a href="/search/cs?searchtype=author&query=Wick%2C+W">Wolfgang Wick</a>, <a href="/search/cs?searchtype=author&query=Calabrese%2C+E">Evan Calabrese</a>, <a href="/search/cs?searchtype=author&query=Rudie%2C+J">Jeffrey Rudie</a>, <a href="/search/cs?searchtype=author&query=Villanueva-Meyer%2C+J">Javier Villanueva-Meyer</a> , et al. (254 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.10836v2-abstract-short" style="display: inline;"> Although machine learning (ML) has shown promise in numerous domains, there are concerns about generalizability to out-of-sample data. This is currently addressed by centrally sharing ample, and importantly diverse, data from multiple sites. However, such centralization is challenging to scale (or even not feasible) due to various limitations. Federated ML (FL) provides an alternative to train acc… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.10836v2-abstract-full').style.display = 'inline'; document.getElementById('2204.10836v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.10836v2-abstract-full" style="display: none;"> Although machine learning (ML) has shown promise in numerous domains, there are concerns about generalizability to out-of-sample data. This is currently addressed by centrally sharing ample, and importantly diverse, data from multiple sites. However, such centralization is challenging to scale (or even not feasible) due to various limitations. Federated ML (FL) provides an alternative to train accurate and generalizable ML models, by only sharing numerical model updates. Here we present findings from the largest FL study to-date, involving data from 71 healthcare institutions across 6 continents, to generate an automatic tumor boundary detector for the rare disease of glioblastoma, utilizing the largest dataset of such patients ever used in the literature (25,256 MRI scans from 6,314 patients). We demonstrate a 33% improvement over a publicly trained model to delineate the surgically targetable tumor, and 23% improvement over the tumor's entire extent. We anticipate our study to: 1) enable more studies in healthcare informed by large and diverse data, ensuring meaningful results for rare diseases and underrepresented populations, 2) facilitate further quantitative analyses for glioblastoma via performance optimization of our consensus model for eventual public release, and 3) demonstrate the effectiveness of FL at such scale and task complexity as a paradigm shift for multi-site collaborations, alleviating the need for data sharing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.10836v2-abstract-full').style.display = 'none'; document.getElementById('2204.10836v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">federated learning, deep learning, convolutional neural network, segmentation, brain tumor, glioma, glioblastoma, FeTS, BraTS</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.01726">arXiv:2203.01726</a> <span> [<a href="https://arxiv.org/pdf/2203.01726">pdf</a>, <a href="https://arxiv.org/format/2203.01726">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41598-022-21910-0">10.1038/s41598-022-21910-0 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Ensembles of Vision Transformers as a New Paradigm for Automated Classification in Ecology </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kyathanahally%2C+S">S. Kyathanahally</a>, <a href="/search/cs?searchtype=author&query=Hardeman%2C+T">T. Hardeman</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">M. Reyes</a>, <a href="/search/cs?searchtype=author&query=Merz%2C+E">E. Merz</a>, <a href="/search/cs?searchtype=author&query=Bulas%2C+T">T. Bulas</a>, <a href="/search/cs?searchtype=author&query=Brun%2C+P">P. Brun</a>, <a href="/search/cs?searchtype=author&query=Pomati%2C+F">F. Pomati</a>, <a href="/search/cs?searchtype=author&query=Baity-Jesi%2C+M">M. Baity-Jesi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.01726v3-abstract-short" style="display: inline;"> Monitoring biodiversity is paramount to manage and protect natural resources. Collecting images of organisms over large temporal or spatial scales is a promising practice to monitor the biodiversity of natural ecosystems, providing large amounts of data with minimal interference with the environment. Deep learning models are currently used to automate classification of organisms into taxonomic uni… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.01726v3-abstract-full').style.display = 'inline'; document.getElementById('2203.01726v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.01726v3-abstract-full" style="display: none;"> Monitoring biodiversity is paramount to manage and protect natural resources. Collecting images of organisms over large temporal or spatial scales is a promising practice to monitor the biodiversity of natural ecosystems, providing large amounts of data with minimal interference with the environment. Deep learning models are currently used to automate classification of organisms into taxonomic units. However, imprecision in these classifiers introduces a measurement noise that is difficult to control and can significantly hinder the analysis and interpretation of data. {We overcome this limitation through ensembles of Data-efficient image Transformers (DeiTs), which not only are easy to train and implement, but also significantly outperform} the previous state of the art (SOTA). We validate our results on ten ecological imaging datasets of diverse origin, ranging from plankton to birds. On all the datasets, we achieve a new SOTA, with a reduction of the error with respect to the previous SOTA ranging from 29.35% to 100.00%, and often achieving performances very close to perfect classification. Ensembles of DeiTs perform better not because of superior single-model performances but rather due to smaller overlaps in the predictions by independent models and lower top-1 probabilities. This increases the benefit of ensembling, especially when using geometric averages to combine individual learners. While we only test our approach on biodiversity image datasets, our approach is generic and can be applied to any kind of images. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.01726v3-abstract-full').style.display = 'none'; document.getElementById('2203.01726v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in Scientific Reports</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Scientific Reports 12, 18590 (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.05258">arXiv:2108.05258</a> <span> [<a href="https://arxiv.org/pdf/2108.05258">pdf</a>, <a href="https://arxiv.org/format/2108.05258">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Populations and Evolution">q-bio.PE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.3389/fmicb.2021.746297">10.3389/fmicb.2021.746297 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Deep Learning Classification of Lake Zooplankton </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kyathanahally%2C+S+P">S. P. Kyathanahally</a>, <a href="/search/cs?searchtype=author&query=Hardeman%2C+T">T. Hardeman</a>, <a href="/search/cs?searchtype=author&query=Merz%2C+E">E. Merz</a>, <a href="/search/cs?searchtype=author&query=Kozakiewicz%2C+T">T. Kozakiewicz</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">M. Reyes</a>, <a href="/search/cs?searchtype=author&query=Isles%2C+P">P. Isles</a>, <a href="/search/cs?searchtype=author&query=Pomati%2C+F">F. Pomati</a>, <a href="/search/cs?searchtype=author&query=Baity-Jesi%2C+M">M. Baity-Jesi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.05258v1-abstract-short" style="display: inline;"> Plankton are effective indicators of environmental change and ecosystem health in freshwater habitats, but collection of plankton data using manual microscopic methods is extremely labor-intensive and expensive. Automated plankton imaging offers a promising way forward to monitor plankton communities with high frequency and accuracy in real-time. Yet, manual annotation of millions of images propos… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.05258v1-abstract-full').style.display = 'inline'; document.getElementById('2108.05258v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.05258v1-abstract-full" style="display: none;"> Plankton are effective indicators of environmental change and ecosystem health in freshwater habitats, but collection of plankton data using manual microscopic methods is extremely labor-intensive and expensive. Automated plankton imaging offers a promising way forward to monitor plankton communities with high frequency and accuracy in real-time. Yet, manual annotation of millions of images proposes a serious challenge to taxonomists. Deep learning classifiers have been successfully applied in various fields and provided encouraging results when used to categorize marine plankton images. Here, we present a set of deep learning models developed for the identification of lake plankton, and study several strategies to obtain optimal performances,which lead to operational prescriptions for users. To this aim, we annotated into 35 classes over 17900 images of zooplankton and large phytoplankton colonies, detected in Lake Greifensee (Switzerland) with the Dual Scripps Plankton Camera. Our best models were based on transfer learning and ensembling, which classified plankton images with 98% accuracy and 93% F1 score. When tested on freely available plankton datasets produced by other automated imaging tools (ZooScan, FlowCytobot and ISIIS), our models performed better than previously used models. Our annotated data, code and classification models are freely available online. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.05258v1-abstract-full').style.display = 'none'; document.getElementById('2108.05258v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Data and code links will be active/updated after publication</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Front. Microbiol. 12:746297 (2021) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.03208">arXiv:2106.03208</a> <span> [<a href="https://arxiv.org/pdf/2106.03208">pdf</a>, <a href="https://arxiv.org/format/2106.03208">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ICPR48806.2021.9413120">10.1109/ICPR48806.2021.9413120 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Deep Learning-based Type Identification of Volumetric MRI Sequences </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=de+Mello%2C+J+P+V">Jean Pablo Vieira de Mello</a>, <a href="/search/cs?searchtype=author&query=Paix%C3%A3o%2C+T+M">Thiago M. Paix茫o</a>, <a href="/search/cs?searchtype=author&query=Berriel%2C+R">Rodrigo Berriel</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Badue%2C+C">Claudine Badue</a>, <a href="/search/cs?searchtype=author&query=De+Souza%2C+A+F">Alberto F. De Souza</a>, <a href="/search/cs?searchtype=author&query=Oliveira-Santos%2C+T">Thiago Oliveira-Santos</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.03208v1-abstract-short" style="display: inline;"> The analysis of Magnetic Resonance Imaging (MRI) sequences enables clinical professionals to monitor the progression of a brain tumor. As the interest for automatizing brain volume MRI analysis increases, it becomes convenient to have each sequence well identified. However, the unstandardized naming of MRI sequences makes their identification difficult for automated systems, as well as makes it di… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.03208v1-abstract-full').style.display = 'inline'; document.getElementById('2106.03208v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.03208v1-abstract-full" style="display: none;"> The analysis of Magnetic Resonance Imaging (MRI) sequences enables clinical professionals to monitor the progression of a brain tumor. As the interest for automatizing brain volume MRI analysis increases, it becomes convenient to have each sequence well identified. However, the unstandardized naming of MRI sequences makes their identification difficult for automated systems, as well as makes it difficult for researches to generate or use datasets for machine learning research. In the face of that, we propose a system for identifying types of brain MRI sequences based on deep learning. By training a Convolutional Neural Network (CNN) based on 18-layer ResNet architecture, our system can classify a volumetric brain MRI as a FLAIR, T1, T1c or T2 sequence, or whether it does not belong to any of these classes. The network was evaluated on publicly available datasets comprising both, pre-processed (BraTS dataset) and non-pre-processed (TCGA-GBM dataset), image types with diverse acquisition protocols, requiring only a few slices of the volume for training. Our system can classify among sequence types with an accuracy of 96.81%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.03208v1-abstract-full').style.display = 'none'; document.getElementById('2106.03208v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> In 2020 25th International Conference on Pattern Recognition (ICPR) (pp. 1-8). IEEE </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.10793">arXiv:2105.10793</a> <span> [<a href="https://arxiv.org/pdf/2105.10793">pdf</a>, <a href="https://arxiv.org/format/2105.10793">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> GOO: A Dataset for Gaze Object Prediction in Retail Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tomas%2C+H">Henri Tomas</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Marcus Reyes</a>, <a href="/search/cs?searchtype=author&query=Dionido%2C+R">Raimarc Dionido</a>, <a href="/search/cs?searchtype=author&query=Ty%2C+M">Mark Ty</a>, <a href="/search/cs?searchtype=author&query=Mirando%2C+J">Jonric Mirando</a>, <a href="/search/cs?searchtype=author&query=Casimiro%2C+J">Joel Casimiro</a>, <a href="/search/cs?searchtype=author&query=Atienza%2C+R">Rowel Atienza</a>, <a href="/search/cs?searchtype=author&query=Guinto%2C+R">Richard Guinto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.10793v2-abstract-short" style="display: inline;"> One of the most fundamental and information-laden actions humans do is to look at objects. However, a survey of current works reveals that existing gaze-related datasets annotate only the pixel being looked at, and not the boundaries of a specific object of interest. This lack of object annotation presents an opportunity for further advancing gaze estimation research. To this end, we present a cha… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.10793v2-abstract-full').style.display = 'inline'; document.getElementById('2105.10793v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.10793v2-abstract-full" style="display: none;"> One of the most fundamental and information-laden actions humans do is to look at objects. However, a survey of current works reveals that existing gaze-related datasets annotate only the pixel being looked at, and not the boundaries of a specific object of interest. This lack of object annotation presents an opportunity for further advancing gaze estimation research. To this end, we present a challenging new task called gaze object prediction, where the goal is to predict a bounding box for a person's gazed-at object. To train and evaluate gaze networks on this task, we present the Gaze On Objects (GOO) dataset. GOO is composed of a large set of synthetic images (GOO Synth) supplemented by a smaller subset of real images (GOO-Real) of people looking at objects in a retail environment. Our work establishes extensive baselines on GOO by re-implementing and evaluating selected state-of-the art models on the task of gaze following and domain adaptation. Code is available on github. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.10793v2-abstract-full').style.display = 'none'; document.getElementById('2105.10793v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CVPR 20201 Workshop on Gaze Estimation and Prediction in the Wild (GAZE 2021)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.05642">arXiv:2104.05642</a> <span> [<a href="https://arxiv.org/pdf/2104.05642">pdf</a>, <a href="https://arxiv.org/format/2104.05642">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Common Limitations of Image Processing Metrics: A Picture Story </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Reinke%2C+A">Annika Reinke</a>, <a href="/search/cs?searchtype=author&query=Tizabi%2C+M+D">Minu D. Tizabi</a>, <a href="/search/cs?searchtype=author&query=Sudre%2C+C+H">Carole H. Sudre</a>, <a href="/search/cs?searchtype=author&query=Eisenmann%2C+M">Matthias Eisenmann</a>, <a href="/search/cs?searchtype=author&query=R%C3%A4dsch%2C+T">Tim R盲dsch</a>, <a href="/search/cs?searchtype=author&query=Baumgartner%2C+M">Michael Baumgartner</a>, <a href="/search/cs?searchtype=author&query=Acion%2C+L">Laura Acion</a>, <a href="/search/cs?searchtype=author&query=Antonelli%2C+M">Michela Antonelli</a>, <a href="/search/cs?searchtype=author&query=Arbel%2C+T">Tal Arbel</a>, <a href="/search/cs?searchtype=author&query=Bakas%2C+S">Spyridon Bakas</a>, <a href="/search/cs?searchtype=author&query=Bankhead%2C+P">Peter Bankhead</a>, <a href="/search/cs?searchtype=author&query=Benis%2C+A">Arriel Benis</a>, <a href="/search/cs?searchtype=author&query=Blaschko%2C+M">Matthew Blaschko</a>, <a href="/search/cs?searchtype=author&query=Buettner%2C+F">Florian Buettner</a>, <a href="/search/cs?searchtype=author&query=Cardoso%2C+M+J">M. Jorge Cardoso</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+J">Jianxu Chen</a>, <a href="/search/cs?searchtype=author&query=Cheplygina%2C+V">Veronika Cheplygina</a>, <a href="/search/cs?searchtype=author&query=Christodoulou%2C+E">Evangelia Christodoulou</a>, <a href="/search/cs?searchtype=author&query=Cimini%2C+B">Beth Cimini</a>, <a href="/search/cs?searchtype=author&query=Collins%2C+G+S">Gary S. Collins</a>, <a href="/search/cs?searchtype=author&query=Engelhardt%2C+S">Sandy Engelhardt</a>, <a href="/search/cs?searchtype=author&query=Farahani%2C+K">Keyvan Farahani</a>, <a href="/search/cs?searchtype=author&query=Ferrer%2C+L">Luciana Ferrer</a>, <a href="/search/cs?searchtype=author&query=Galdran%2C+A">Adrian Galdran</a>, <a href="/search/cs?searchtype=author&query=van+Ginneken%2C+B">Bram van Ginneken</a> , et al. (68 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.05642v8-abstract-short" style="display: inline;"> While the importance of automatic image analysis is continuously increasing, recent meta-research revealed major flaws with respect to algorithm validation. Performance metrics are particularly key for meaningful, objective, and transparent performance assessment and validation of the used automatic algorithms, but relatively little attention has been given to the practical pitfalls when using spe… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.05642v8-abstract-full').style.display = 'inline'; document.getElementById('2104.05642v8-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.05642v8-abstract-full" style="display: none;"> While the importance of automatic image analysis is continuously increasing, recent meta-research revealed major flaws with respect to algorithm validation. Performance metrics are particularly key for meaningful, objective, and transparent performance assessment and validation of the used automatic algorithms, but relatively little attention has been given to the practical pitfalls when using specific metrics for a given image analysis task. These are typically related to (1) the disregard of inherent metric properties, such as the behaviour in the presence of class imbalance or small target structures, (2) the disregard of inherent data set properties, such as the non-independence of the test cases, and (3) the disregard of the actual biomedical domain interest that the metrics should reflect. This living dynamically document has the purpose to illustrate important limitations of performance metrics commonly applied in the field of image analysis. In this context, it focuses on biomedical image analysis problems that can be phrased as image-level classification, semantic segmentation, instance segmentation, or object detection task. The current version is based on a Delphi process on metrics conducted by an international consortium of image analysis experts from more than 60 institutions worldwide. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.05642v8-abstract-full').style.display = 'none'; document.getElementById('2104.05642v8-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Shared first authors: Annika Reinke and Minu D. Tizabi. This is a dynamic paper on limitations of commonly used metrics. It discusses metrics for image-level classification, semantic and instance segmentation, and object detection. For missing use cases, comments or questions, please contact a.reinke@dkfz.de. Substantial contributions to this document will be acknowledged with a co-authorship</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.00489">arXiv:2101.00489</a> <span> [<a href="https://arxiv.org/pdf/2101.00489">pdf</a>, <a href="https://arxiv.org/format/2101.00489">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.media.2020.101888">10.1016/j.media.2020.101888 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Combining unsupervised and supervised learning for predicting the final stroke lesion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Pinto%2C+A">Adriano Pinto</a>, <a href="/search/cs?searchtype=author&query=Pereira%2C+S">S茅rgio Pereira</a>, <a href="/search/cs?searchtype=author&query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&query=Alves%2C+V">Victor Alves</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Silva%2C+C+A">Carlos A. Silva</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.00489v1-abstract-short" style="display: inline;"> Predicting the final ischaemic stroke lesion provides crucial information regarding the volume of salvageable hypoperfused tissue, which helps physicians in the difficult decision-making process of treatment planning and intervention. Treatment selection is influenced by clinical diagnosis, which requires delineating the stroke lesion, as well as characterising cerebral blood flow dynamics using n… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.00489v1-abstract-full').style.display = 'inline'; document.getElementById('2101.00489v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.00489v1-abstract-full" style="display: none;"> Predicting the final ischaemic stroke lesion provides crucial information regarding the volume of salvageable hypoperfused tissue, which helps physicians in the difficult decision-making process of treatment planning and intervention. Treatment selection is influenced by clinical diagnosis, which requires delineating the stroke lesion, as well as characterising cerebral blood flow dynamics using neuroimaging acquisitions. Nonetheless, predicting the final stroke lesion is an intricate task, due to the variability in lesion size, shape, location and the underlying cerebral haemodynamic processes that occur after the ischaemic stroke takes place. Moreover, since elapsed time between stroke and treatment is related to the loss of brain tissue, assessing and predicting the final stroke lesion needs to be performed in a short period of time, which makes the task even more complex. Therefore, there is a need for automatic methods that predict the final stroke lesion and support physicians in the treatment decision process. We propose a fully automatic deep learning method based on unsupervised and supervised learning to predict the final stroke lesion after 90 days. Our aim is to predict the final stroke lesion location and extent, taking into account the underlying cerebral blood flow dynamics that can influence the prediction. To achieve this, we propose a two-branch Restricted Boltzmann Machine, which provides specialized data-driven features from different sets of standard parametric Magnetic Resonance Imaging maps. These data-driven feature maps are then combined with the parametric Magnetic Resonance Imaging maps, and fed to a Convolutional and Recurrent Neural Network architecture. We evaluated our proposal on the publicly available ISLES 2017 testing dataset, reaching a Dice score of 0.38, Hausdorff Distance of 29.21 mm, and Average Symmetric Surface Distance of 5.52 mm. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.00489v1-abstract-full').style.display = 'none'; document.getElementById('2101.00489v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at Medical Image Analysis (MedIA)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.03639">arXiv:2010.03639</a> <span> [<a href="https://arxiv.org/pdf/2010.03639">pdf</a>, <a href="https://arxiv.org/format/2010.03639">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.cmpb.2020.105796">10.1016/j.cmpb.2020.105796 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> pymia: A Python package for data handling and evaluation in deep learning-based medical image analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jungo%2C+A">Alain Jungo</a>, <a href="/search/cs?searchtype=author&query=Scheidegger%2C+O">Olivier Scheidegger</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Balsiger%2C+F">Fabian Balsiger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.03639v2-abstract-short" style="display: inline;"> Background and Objective: Deep learning enables tremendous progress in medical image analysis. One driving force of this progress are open-source frameworks like TensorFlow and PyTorch. However, these frameworks rarely address issues specific to the domain of medical image analysis, such as 3-D data handling and distance metrics for evaluation. pymia, an open-source Python package, tries to addres… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.03639v2-abstract-full').style.display = 'inline'; document.getElementById('2010.03639v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.03639v2-abstract-full" style="display: none;"> Background and Objective: Deep learning enables tremendous progress in medical image analysis. One driving force of this progress are open-source frameworks like TensorFlow and PyTorch. However, these frameworks rarely address issues specific to the domain of medical image analysis, such as 3-D data handling and distance metrics for evaluation. pymia, an open-source Python package, tries to address these issues by providing flexible data handling and evaluation independent of the deep learning framework. Methods: The pymia package provides data handling and evaluation functionalities. The data handling allows flexible medical image handling in every commonly used format (e.g., 2-D, 2.5-D, and 3-D; full- or patch-wise). Even data beyond images like demographics or clinical reports can easily be integrated into deep learning pipelines. The evaluation allows stand-alone result calculation and reporting, as well as performance monitoring during training using a vast amount of domain-specific metrics for segmentation, reconstruction, and regression. Results: The pymia package is highly flexible, allows for fast prototyping, and reduces the burden of implementing data handling routines and evaluation methods. While data handling and evaluation are independent of the deep learning framework used, they can easily be integrated into TensorFlow and PyTorch pipelines. The developed package was successfully used in a variety of research projects for segmentation, reconstruction, and regression. Conclusions: The pymia package fills the gap of current deep learning frameworks regarding data handling and evaluation in medical image analysis. It is available at https://github.com/rundherum/pymia and can directly be installed from the Python Package Index using pip install pymia. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.03639v2-abstract-full').style.display = 'none'; document.getElementById('2010.03639v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">first and last author contributed equally</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Computer Methods and Programs in Biomedicine (2021), 198, 105796 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.04139">arXiv:2008.04139</a> <span> [<a href="https://arxiv.org/pdf/2008.04139">pdf</a>, <a href="https://arxiv.org/format/2008.04139">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-61598-7_6">10.1007/978-3-030-61598-7_6 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Learning Bloch Simulations for MR Fingerprinting by Invertible Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Balsiger%2C+F">Fabian Balsiger</a>, <a href="/search/cs?searchtype=author&query=Jungo%2C+A">Alain Jungo</a>, <a href="/search/cs?searchtype=author&query=Scheidegger%2C+O">Olivier Scheidegger</a>, <a href="/search/cs?searchtype=author&query=Marty%2C+B">Benjamin Marty</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.04139v2-abstract-short" style="display: inline;"> Magnetic resonance fingerprinting (MRF) enables fast and multiparametric MR imaging. Despite fast acquisition, the state-of-the-art reconstruction of MRF based on dictionary matching is slow and lacks scalability. To overcome these limitations, neural network (NN) approaches estimating MR parameters from fingerprints have been proposed recently. Here, we revisit NN-based MRF reconstruction to join… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.04139v2-abstract-full').style.display = 'inline'; document.getElementById('2008.04139v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.04139v2-abstract-full" style="display: none;"> Magnetic resonance fingerprinting (MRF) enables fast and multiparametric MR imaging. Despite fast acquisition, the state-of-the-art reconstruction of MRF based on dictionary matching is slow and lacks scalability. To overcome these limitations, neural network (NN) approaches estimating MR parameters from fingerprints have been proposed recently. Here, we revisit NN-based MRF reconstruction to jointly learn the forward process from MR parameters to fingerprints and the backward process from fingerprints to MR parameters by leveraging invertible neural networks (INNs). As a proof-of-concept, we perform various experiments showing the benefit of learning the forward process, i.e., the Bloch simulations, for improved MR parameter estimation. The benefit especially accentuates when MR parameter estimation is difficult due to MR physical restrictions. Therefore, INNs might be a feasible alternative to the current solely backward-based NNs for MRF reconstruction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.04139v2-abstract-full').style.display = 'none'; document.getElementById('2008.04139v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at MICCAI MLMIR 2020</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Machine Learning for Medical Image Reconstruction. MLMIR 2020. Lecture Notes in Computer Science, vol 12450. Springer, Cham </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.03778">arXiv:2005.03778</a> <span> [<a href="https://arxiv.org/pdf/2005.03778">pdf</a>, <a href="https://arxiv.org/format/2005.03778">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> LGSVL Simulator: A High Fidelity Simulator for Autonomous Driving </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Rong%2C+G">Guodong Rong</a>, <a href="/search/cs?searchtype=author&query=Shin%2C+B+H">Byung Hyun Shin</a>, <a href="/search/cs?searchtype=author&query=Tabatabaee%2C+H">Hadi Tabatabaee</a>, <a href="/search/cs?searchtype=author&query=Lu%2C+Q">Qiang Lu</a>, <a href="/search/cs?searchtype=author&query=Lemke%2C+S">Steve Lemke</a>, <a href="/search/cs?searchtype=author&query=Mo%C5%BEeiko%2C+M">M膩rti艈拧 Mo啪eiko</a>, <a href="/search/cs?searchtype=author&query=Boise%2C+E">Eric Boise</a>, <a href="/search/cs?searchtype=author&query=Uhm%2C+G">Geehoon Uhm</a>, <a href="/search/cs?searchtype=author&query=Gerow%2C+M">Mark Gerow</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+S">Shalin Mehta</a>, <a href="/search/cs?searchtype=author&query=Agafonov%2C+E">Eugene Agafonov</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+T+H">Tae Hyung Kim</a>, <a href="/search/cs?searchtype=author&query=Sterner%2C+E">Eric Sterner</a>, <a href="/search/cs?searchtype=author&query=Ushiroda%2C+K">Keunhae Ushiroda</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Michael Reyes</a>, <a href="/search/cs?searchtype=author&query=Zelenkovsky%2C+D">Dmitry Zelenkovsky</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+S">Seonman Kim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.03778v3-abstract-short" style="display: inline;"> Testing autonomous driving algorithms on real autonomous vehicles is extremely costly and many researchers and developers in the field cannot afford a real car and the corresponding sensors. Although several free and open-source autonomous driving stacks, such as Autoware and Apollo are available, choices of open-source simulators to use with them are limited. In this paper, we introduce the LGSVL… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.03778v3-abstract-full').style.display = 'inline'; document.getElementById('2005.03778v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.03778v3-abstract-full" style="display: none;"> Testing autonomous driving algorithms on real autonomous vehicles is extremely costly and many researchers and developers in the field cannot afford a real car and the corresponding sensors. Although several free and open-source autonomous driving stacks, such as Autoware and Apollo are available, choices of open-source simulators to use with them are limited. In this paper, we introduce the LGSVL Simulator which is a high fidelity simulator for autonomous driving. The simulator engine provides end-to-end, full-stack simulation which is ready to be hooked up to Autoware and Apollo. In addition, simulator tools are provided with the core simulation engine which allow users to easily customize sensors, create new types of controllable objects, replace some modules in the core simulator, and create digital twins of particular environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.03778v3-abstract-full').style.display = 'none'; document.getElementById('2005.03778v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 7 figures, ITSC 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.11430">arXiv:2004.11430</a> <span> [<a href="https://arxiv.org/pdf/2004.11430">pdf</a>, <a href="https://arxiv.org/format/2004.11430">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Populations and Evolution">q-bio.PE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1001/jamanetworkopen.2020.20485">10.1001/jamanetworkopen.2020.20485 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Mobile phone location data reveal the effect and geographic variation of social distancing on the spread of the COVID-19 epidemic </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gao%2C+S">Song Gao</a>, <a href="/search/cs?searchtype=author&query=Rao%2C+J">Jinmeng Rao</a>, <a href="/search/cs?searchtype=author&query=Kang%2C+Y">Yuhao Kang</a>, <a href="/search/cs?searchtype=author&query=Liang%2C+Y">Yunlei Liang</a>, <a href="/search/cs?searchtype=author&query=Kruse%2C+J">Jake Kruse</a>, <a href="/search/cs?searchtype=author&query=Doepfer%2C+D">Doerte Doepfer</a>, <a href="/search/cs?searchtype=author&query=Sethi%2C+A+K">Ajay K. Sethi</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+J+F+M">Juan Francisco Mandujano Reyes</a>, <a href="/search/cs?searchtype=author&query=Patz%2C+J">Jonathan Patz</a>, <a href="/search/cs?searchtype=author&query=Yandell%2C+B+S">Brian S. Yandell</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.11430v1-abstract-short" style="display: inline;"> The emergence of SARS-CoV-2 and the coronavirus infectious disease (COVID-19) has become a pandemic. Social (physical) distancing is a key non-pharmacologic control measure to reduce the transmission rate of SARS-COV-2, but high-level adherence is needed. Using daily travel distance and stay-at-home time derived from large-scale anonymous mobile phone location data provided by Descartes Labs and S… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.11430v1-abstract-full').style.display = 'inline'; document.getElementById('2004.11430v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.11430v1-abstract-full" style="display: none;"> The emergence of SARS-CoV-2 and the coronavirus infectious disease (COVID-19) has become a pandemic. Social (physical) distancing is a key non-pharmacologic control measure to reduce the transmission rate of SARS-COV-2, but high-level adherence is needed. Using daily travel distance and stay-at-home time derived from large-scale anonymous mobile phone location data provided by Descartes Labs and SafeGraph, we quantify the degree to which social distancing mandates have been followed in the U.S. and its effect on growth of COVID-19 cases. The correlation between the COVID-19 growth rate and travel distance decay rate and dwell time at home change rate was -0.586 (95% CI: -0.742 ~ -0.370) and 0.526 (95% CI: 0.293 ~ 0.700), respectively. Increases in state-specific doubling time of total cases ranged from 1.04 ~ 6.86 days to 3.66 ~ 30.29 days after social distancing orders were put in place, consistent with mechanistic epidemic prediction models. Social distancing mandates reduce the spread of COVID-19 when they are followed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.11430v1-abstract-full').style.display = 'none'; document.getElementById('2004.11430v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 4 figures, 1 table</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 65D10 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.4; G.3; J.2 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> JAMA Network Open. 2020;3(9):e2020485 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.13545">arXiv:2003.13545</a> <span> [<a href="https://arxiv.org/pdf/2003.13545">pdf</a>, <a href="https://arxiv.org/format/2003.13545">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Translating Behavioral Theory into Technological Interventions: Case Study of an mHealth App to Increase Self-reporting of Substance-Use Related Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Rabbi%2C+M">Mashfiqui Rabbi</a>, <a href="/search/cs?searchtype=author&query=Philyaw-Kotov%2C+M">Meredith Philyaw-Kotov</a>, <a href="/search/cs?searchtype=author&query=Li%2C+J">Jinseok Li</a>, <a href="/search/cs?searchtype=author&query=Li%2C+K">Katherine Li</a>, <a href="/search/cs?searchtype=author&query=Rothman%2C+B">Bess Rothman</a>, <a href="/search/cs?searchtype=author&query=Giragosian%2C+L">Lexa Giragosian</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Maya Reyes</a>, <a href="/search/cs?searchtype=author&query=Gadway%2C+H">Hannah Gadway</a>, <a href="/search/cs?searchtype=author&query=Cunningham%2C+R">Rebecca Cunningham</a>, <a href="/search/cs?searchtype=author&query=Bonar%2C+E">Erin Bonar</a>, <a href="/search/cs?searchtype=author&query=Nahum-Shani%2C+I">Inbal Nahum-Shani</a>, <a href="/search/cs?searchtype=author&query=Walton%2C+M">Maureen Walton</a>, <a href="/search/cs?searchtype=author&query=Murphy%2C+S">Susan Murphy</a>, <a href="/search/cs?searchtype=author&query=Klasnja%2C+P">Predrag Klasnja</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.13545v1-abstract-short" style="display: inline;"> Mobile health (mHealth) applications are a powerful medium for providing behavioral interventions, and systematic reviews suggest that theory-based interventions are more effective. However, how exactly theoretical concepts should be translated into features of technological interventions is often not clear. There is a gulf between the abstract nature of psychological theory and the concreteness o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.13545v1-abstract-full').style.display = 'inline'; document.getElementById('2003.13545v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.13545v1-abstract-full" style="display: none;"> Mobile health (mHealth) applications are a powerful medium for providing behavioral interventions, and systematic reviews suggest that theory-based interventions are more effective. However, how exactly theoretical concepts should be translated into features of technological interventions is often not clear. There is a gulf between the abstract nature of psychological theory and the concreteness of the designs needed to build health technologies. In this paper, we use SARA, a mobile app we developed to support substance-use research among adolescents and young adults, as a case study of a process of translating behavioral theory into mHealth intervention design. SARA was designed to increase adherence to daily self-report in longitudinal epidemiological studies. To achieve this goal, we implemented a number of constructs from the operant conditioning theory. We describe our design process and discuss how we operationalized theoretical constructs in the light of design constraints, user feedback, and empirical data from four formative studies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.13545v1-abstract-full').style.display = 'none'; document.getElementById('2003.13545v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.05391">arXiv:1911.05391</a> <span> [<a href="https://arxiv.org/pdf/1911.05391">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.30534/ijatcse/2019/115852019">10.30534/ijatcse/2019/115852019 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Development of a Secure and Private Electronic Procurement System based on Blockchain Implementation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Thio-ac%2C+A">August Thio-ac</a>, <a href="/search/cs?searchtype=author&query=Domingo%2C+E+J">Erwin John Domingo</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+R+M">Ricca May Reyes</a>, <a href="/search/cs?searchtype=author&query=Arago%2C+N">Nilo Arago</a>, <a href="/search/cs?searchtype=author&query=Jorda%2C+R+J">Romeo Jr. Jorda</a>, <a href="/search/cs?searchtype=author&query=Velasco%2C+J">Jessica Velasco</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.05391v1-abstract-short" style="display: inline;"> This paper presents the development of an online procurement system and the integration of blockchain technology. Various tools such as PHP, JavaScript, HTML, CSS, and jQuery were used in designing the graphical, programming logic, and blockchain aspect of the system. Every page and function will have their respective construction and result. In addition, the proposed system's flow of process and… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.05391v1-abstract-full').style.display = 'inline'; document.getElementById('1911.05391v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.05391v1-abstract-full" style="display: none;"> This paper presents the development of an online procurement system and the integration of blockchain technology. Various tools such as PHP, JavaScript, HTML, CSS, and jQuery were used in designing the graphical, programming logic, and blockchain aspect of the system. Every page and function will have their respective construction and result. In addition, the proposed system's flow of process and the methods on the testing and hosting of the site as well as the different web development languages used in every part of the development and design process were presented. The proposed system was successfully and functionally developed starting from the execution of procurement proper, to the placement of procured items or goods, and up to the signing of contracts by the winner and the procurer. Lastly, features were added such as user profiles of the bidder and procurer. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.05391v1-abstract-full').style.display = 'none'; document.getElementById('1911.05391v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> International Journal of Advanced Trends in Computer Science and Engineering (2019) 2626-2631 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.03786">arXiv:1911.03786</a> <span> [<a href="https://arxiv.org/pdf/1911.03786">pdf</a>, <a href="https://arxiv.org/format/1911.03786">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.media.2020.101741">10.1016/j.media.2020.101741 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Spatially Regularized Parametric Map Reconstruction for Fast Magnetic Resonance Fingerprinting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Balsiger%2C+F">Fabian Balsiger</a>, <a href="/search/cs?searchtype=author&query=Jungo%2C+A">Alain Jungo</a>, <a href="/search/cs?searchtype=author&query=Scheidegger%2C+O">Olivier Scheidegger</a>, <a href="/search/cs?searchtype=author&query=Carlier%2C+P+G">Pierre G. Carlier</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Marty%2C+B">Benjamin Marty</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.03786v2-abstract-short" style="display: inline;"> Magnetic resonance fingerprinting (MRF) provides a unique concept for simultaneous and fast acquisition of multiple quantitative MR parameters. Despite acquisition efficiency, adoption of MRF into the clinics is hindered by its dictionary matching-based reconstruction, which is computationally demanding and lacks scalability. Here, we propose a convolutional neural network-based reconstruction, wh… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.03786v2-abstract-full').style.display = 'inline'; document.getElementById('1911.03786v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.03786v2-abstract-full" style="display: none;"> Magnetic resonance fingerprinting (MRF) provides a unique concept for simultaneous and fast acquisition of multiple quantitative MR parameters. Despite acquisition efficiency, adoption of MRF into the clinics is hindered by its dictionary matching-based reconstruction, which is computationally demanding and lacks scalability. Here, we propose a convolutional neural network-based reconstruction, which enables both accurate and fast reconstruction of parametric maps, and is adaptable based on the needs of spatial regularization and the capacity for the reconstruction. We evaluated the method using MRF T1-FF, an MRF sequence for T1 relaxation time of water (T1H2O) and fat fraction (FF) mapping. We demonstrate the method's performance on a highly heterogeneous dataset consisting of 164 patients with various neuromuscular diseases imaged at thighs and legs. We empirically show the benefit of incorporating spatial regularization during the reconstruction and demonstrate that the method learns meaningful features from MR physics perspective. Further, we investigate the ability of the method to handle highly heterogeneous morphometric variations and its generalization to anatomical regions unseen during training. The obtained results outperform the state-of-the-art in deep learning-based MRF reconstruction. The method achieved normalized root mean squared errors of 0.048 $\pm$ 0.011 for T1H2O maps and 0.027 $\pm$ 0.004 for FF maps when compared to the dictionary matching in a test set of 50 patients. Coupled with fast MRF sequences, the proposed method has the potential of enabling multiparametric MR imaging in clinically feasible time. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.03786v2-abstract-full').style.display = 'none'; document.getElementById('1911.03786v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to Medical Image Analysis</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Medical Image Analysis (2020), 64, 101741 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1909.11966">arXiv:1909.11966</a> <span> [<a href="https://arxiv.org/pdf/1909.11966">pdf</a>, <a href="https://arxiv.org/format/1909.11966">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Dual-Stream Pyramid Registration Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kang%2C+M">Miao Kang</a>, <a href="/search/cs?searchtype=author&query=Hu%2C+X">Xiaojun Hu</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+W">Weilin Huang</a>, <a href="/search/cs?searchtype=author&query=Scott%2C+M+R">Matthew R. Scott</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1909.11966v2-abstract-short" style="display: inline;"> We propose a Dual-Stream Pyramid Registration Network (referred as Dual-PRNet) for unsupervised 3D medical image registration. Unlike recent CNN-based registration approaches, such as VoxelMorph, which explores a single-stream encoder-decoder network to compute a registration fields from a pair of 3D volumes, we design a two-stream architecture able to compute multi-scale registration fields from… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.11966v2-abstract-full').style.display = 'inline'; document.getElementById('1909.11966v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1909.11966v2-abstract-full" style="display: none;"> We propose a Dual-Stream Pyramid Registration Network (referred as Dual-PRNet) for unsupervised 3D medical image registration. Unlike recent CNN-based registration approaches, such as VoxelMorph, which explores a single-stream encoder-decoder network to compute a registration fields from a pair of 3D volumes, we design a two-stream architecture able to compute multi-scale registration fields from convolutional feature pyramids. Our contributions are two-fold: (i) we design a two-stream 3D encoder-decoder network which computes two convolutional feature pyramids separately for a pair of input volumes, resulting in strong deep representations that are meaningful for deformation estimation; (ii) we propose a pyramid registration module able to predict multi-scale registration fields directly from the decoding feature pyramids. This allows it to refine the registration fields gradually in a coarse-to-fine manner via sequential warping, and enable the model with the capability for handling significant deformations between two volumes, such as large displacements in spatial domain or slice space. The proposed Dual-PRNet is evaluated on two standard benchmarks for brain MRI registration, where it outperforms the state-of-the-art approaches by a large margin, e.g., having improvements over recent VoxelMorph [2] with 0.683->0.778 on the LPBA40, and 0.511->0.631 on the Mindboggle101, in term of average Dice score. Code is available at: https://github.com/kangmiao15/Dual-Stream-PRNet-Plus. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.11966v2-abstract-full').style.display = 'none'; document.getElementById('1909.11966v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in Medical Image Analysis, 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.03338">arXiv:1907.03338</a> <span> [<a href="https://arxiv.org/pdf/1907.03338">pdf</a>, <a href="https://arxiv.org/ps/1907.03338">ps</a>, <a href="https://arxiv.org/format/1907.03338">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Assessing Reliability and Challenges of Uncertainty Estimations for Medical Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jungo%2C+A">Alain Jungo</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.03338v2-abstract-short" style="display: inline;"> Despite the recent improvements in overall accuracy, deep learning systems still exhibit low levels of robustness. Detecting possible failures is critical for a successful clinical integration of these systems, where each data point corresponds to an individual patient. Uncertainty measures are a promising direction to improve failure detection since they provide a measure of a system's confidence… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.03338v2-abstract-full').style.display = 'inline'; document.getElementById('1907.03338v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.03338v2-abstract-full" style="display: none;"> Despite the recent improvements in overall accuracy, deep learning systems still exhibit low levels of robustness. Detecting possible failures is critical for a successful clinical integration of these systems, where each data point corresponds to an individual patient. Uncertainty measures are a promising direction to improve failure detection since they provide a measure of a system's confidence. Although many uncertainty estimation methods have been proposed for deep learning, little is known on their benefits and current challenges for medical image segmentation. Therefore, we report results of evaluating common voxel-wise uncertainty measures with respect to their reliability, and limitations on two medical image segmentation datasets. Results show that current uncertainty methods perform similarly and although they are well-calibrated at the dataset level, they tend to be miscalibrated at subject-level. Therefore, the reliability of uncertainty estimates is compromised, highlighting the importance of developing subject-wise uncertainty estimations. Additionally, among the benchmarked methods, we found auxiliary networks to be a valid alternative to common uncertainty methods since they can be applied to any previously trained segmentation model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.03338v2-abstract-full').style.display = 'none'; document.getElementById('1907.03338v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Appears in Medical Image Computing and Computer Assisted Interventions (MICCAI), 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1906.02281">arXiv:1906.02281</a> <span> [<a href="https://arxiv.org/pdf/1906.02281">pdf</a>, <a href="https://arxiv.org/format/1906.02281">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-32245-8_31">10.1007/978-3-030-32245-8_31 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Learning Shape Representation on Sparse Point Clouds for Volumetric Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Balsiger%2C+F">Fabian Balsiger</a>, <a href="/search/cs?searchtype=author&query=Soom%2C+Y">Yannick Soom</a>, <a href="/search/cs?searchtype=author&query=Scheidegger%2C+O">Olivier Scheidegger</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1906.02281v1-abstract-short" style="display: inline;"> Volumetric image segmentation with convolutional neural networks (CNNs) encounters several challenges, which are specific to medical images. Among these challenges are large volumes of interest, high class imbalances, and difficulties in learning shape representations. To tackle these challenges, we propose to improve over traditional CNN-based volumetric image segmentation through point-wise clas… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1906.02281v1-abstract-full').style.display = 'inline'; document.getElementById('1906.02281v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1906.02281v1-abstract-full" style="display: none;"> Volumetric image segmentation with convolutional neural networks (CNNs) encounters several challenges, which are specific to medical images. Among these challenges are large volumes of interest, high class imbalances, and difficulties in learning shape representations. To tackle these challenges, we propose to improve over traditional CNN-based volumetric image segmentation through point-wise classification of point clouds. The sparsity of point clouds allows processing of entire image volumes, balancing highly imbalanced segmentation problems, and explicitly learning an anatomical shape. We build upon PointCNN, a neural network proposed to process point clouds, and propose here to jointly encode shape and volumetric information within the point cloud in a compact and computationally effective manner. We demonstrate how this approach can then be used to refine CNN-based segmentation, which yields significantly improved results in our experiments on the difficult task of peripheral nerve segmentation from magnetic resonance neurography images. By synthetic experiments, we further show the capability of our approach in learning an explicit anatomical shape representation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1906.02281v1-abstract-full').style.display = 'none'; document.getElementById('1906.02281v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 June, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at MICCAI 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.10781">arXiv:1904.10781</a> <span> [<a href="https://arxiv.org/pdf/1904.10781">pdf</a>, <a href="https://arxiv.org/format/1904.10781">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Informative sample generation using class aware generative adversarial networks for classification of chest Xrays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Bozorgtabar%2C+B">Behzad Bozorgtabar</a>, <a href="/search/cs?searchtype=author&query=Mahapatra%2C+D">Dwarikanath Mahapatra</a>, <a href="/search/cs?searchtype=author&query=von+Teng%2C+H">Hendrik von Teng</a>, <a href="/search/cs?searchtype=author&query=Pollinger%2C+A">Alexander Pollinger</a>, <a href="/search/cs?searchtype=author&query=Ebner%2C+L">Lukas Ebner</a>, <a href="/search/cs?searchtype=author&query=Thiran%2C+J">Jean-Phillipe Thiran</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.10781v2-abstract-short" style="display: inline;"> Training robust deep learning (DL) systems for disease detection from medical images is challenging due to limited images covering different disease types and severity. The problem is especially acute, where there is a severe class imbalance. We propose an active learning (AL) framework to select most informative samples for training our model using a Bayesian neural network. Informative samples a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.10781v2-abstract-full').style.display = 'inline'; document.getElementById('1904.10781v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.10781v2-abstract-full" style="display: none;"> Training robust deep learning (DL) systems for disease detection from medical images is challenging due to limited images covering different disease types and severity. The problem is especially acute, where there is a severe class imbalance. We propose an active learning (AL) framework to select most informative samples for training our model using a Bayesian neural network. Informative samples are then used within a novel class aware generative adversarial network (CAGAN) to generate realistic chest xray images for data augmentation by transferring characteristics from one class label to another. Experiments show our proposed AL framework is able to achieve state-of-the-art performance by using about $35\%$ of the full dataset, thus saving significant time and effort over conventional methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.10781v2-abstract-full').style.display = 'none'; document.getElementById('1904.10781v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.03041">arXiv:1904.03041</a> <span> [<a href="https://arxiv.org/pdf/1904.03041">pdf</a>, <a href="https://arxiv.org/format/1904.03041">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Automatic detection of lesion load change in Multiple Sclerosis using convolutional neural networks with segmentation confidence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=McKinley%2C+R">Richard McKinley</a>, <a href="/search/cs?searchtype=author&query=Grunder%2C+L">Lorenz Grunder</a>, <a href="/search/cs?searchtype=author&query=Wepfer%2C+R">Rik Wepfer</a>, <a href="/search/cs?searchtype=author&query=Aschwanden%2C+F">Fabian Aschwanden</a>, <a href="/search/cs?searchtype=author&query=Fischer%2C+T">Tim Fischer</a>, <a href="/search/cs?searchtype=author&query=Friedli%2C+C">Christoph Friedli</a>, <a href="/search/cs?searchtype=author&query=Muri%2C+R">Raphaela Muri</a>, <a href="/search/cs?searchtype=author&query=Rummel%2C+C">Christian Rummel</a>, <a href="/search/cs?searchtype=author&query=Verma%2C+R">Rajeev Verma</a>, <a href="/search/cs?searchtype=author&query=Weisstanner%2C+C">Christian Weisstanner</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Salmen%2C+A">Anke Salmen</a>, <a href="/search/cs?searchtype=author&query=Chan%2C+A">Andrew Chan</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&query=Wagner%2C+F">Franca Wagner</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.03041v1-abstract-short" style="display: inline;"> The detection of new or enlarged white-matter lesions in multiple sclerosis is a vital task in the monitoring of patients undergoing disease-modifying treatment for multiple sclerosis. However, the definition of 'new or enlarged' is not fixed, and it is known that lesion-counting is highly subjective, with high degree of inter- and intra-rater variability. Automated methods for lesion quantificati… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.03041v1-abstract-full').style.display = 'inline'; document.getElementById('1904.03041v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.03041v1-abstract-full" style="display: none;"> The detection of new or enlarged white-matter lesions in multiple sclerosis is a vital task in the monitoring of patients undergoing disease-modifying treatment for multiple sclerosis. However, the definition of 'new or enlarged' is not fixed, and it is known that lesion-counting is highly subjective, with high degree of inter- and intra-rater variability. Automated methods for lesion quantification hold the potential to make the detection of new and enlarged lesions consistent and repeatable. However, the majority of lesion segmentation algorithms are not evaluated for their ability to separate progressive from stable patients, despite this being a pressing clinical use-case. In this paper we show that change in volumetric measurements of lesion load alone is not a good method for performing this separation, even for highly performing segmentation methods. Instead, we propose a method for identifying lesion changes of high certainty, and establish on a dataset of longitudinal multiple sclerosis cases that this method is able to separate progressive from stable timepoints with a very high level of discrimination (AUC = 0.99), while changes in lesion volume are much less able to perform this separation (AUC = 0.71). Validation of the method on a second external dataset confirms that the method is able to generalize beyond the setting in which it was trained, achieving an accuracy of 83% in separating stable and progressive timepoints. Both lesion volume and count have previously been shown to be strong predictors of disease course across a population. However, we demonstrate that for individual patients, changes in these measures are not an adequate means of establishing no evidence of disease activity. Meanwhile, directly detecting tissue which changes, with high confidence, from non-lesion to lesion is a feasible methodology for identifying radiologically active patients. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.03041v1-abstract-full').style.display = 'none'; document.getElementById('1904.03041v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.02436">arXiv:1904.02436</a> <span> [<a href="https://arxiv.org/pdf/1904.02436">pdf</a>, <a href="https://arxiv.org/format/1904.02436">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Few-shot brain segmentation from weakly labeled data with deep heteroscedastic multi-task networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=McKinley%2C+R">Richard McKinley</a>, <a href="/search/cs?searchtype=author&query=Rebsamen%2C+M">Michael Rebsamen</a>, <a href="/search/cs?searchtype=author&query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Rummel%2C+C">Christian Rummel</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+R">Roland Wiest</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.02436v1-abstract-short" style="display: inline;"> In applications of supervised learning applied to medical image segmentation, the need for large amounts of labeled data typically goes unquestioned. In particular, in the case of brain anatomy segmentation, hundreds or thousands of weakly-labeled volumes are often used as training data. In this paper, we first observe that for many brain structures, a small number of training examples, (n=9), wea… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.02436v1-abstract-full').style.display = 'inline'; document.getElementById('1904.02436v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.02436v1-abstract-full" style="display: none;"> In applications of supervised learning applied to medical image segmentation, the need for large amounts of labeled data typically goes unquestioned. In particular, in the case of brain anatomy segmentation, hundreds or thousands of weakly-labeled volumes are often used as training data. In this paper, we first observe that for many brain structures, a small number of training examples, (n=9), weakly labeled using Freesurfer 6.0, plus simple data augmentation, suffice as training data to achieve high performance, achieving an overall mean Dice coefficient of $0.84 \pm 0.12$ compared to Freesurfer over 28 brain structures in T1-weighted images of $\approx 4000$ 9-10 year-olds from the Adolescent Brain Cognitive Development study. We then examine two varieties of heteroscedastic network as a method for improving classification results. An existing proposal by Kendall and Gal, which uses Monte-Carlo inference to learn to predict the variance of each prediction, yields an overall mean Dice of $0.85 \pm 0.14$ and showed statistically significant improvements over 25 brain structures. Meanwhile a novel heteroscedastic network which directly learns the probability that an example has been mislabeled yielded an overall mean Dice of $0.87 \pm 0.11$ and showed statistically significant improvements over all but one of the brain structures considered. The loss function associated to this network can be interpreted as performing a form of learned label smoothing, where labels are only smoothed where they are judged to be uncertain. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.02436v1-abstract-full').style.display = 'none'; document.getElementById('1904.02436v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1903.01322">arXiv:1903.01322</a> <span> [<a href="https://arxiv.org/pdf/1903.01322">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Automatic Handgun Detection in X-ray Images using Bag of Words Model with Selective Search </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Pi%C3%B1ol%2C+D+C">David Castro Pi帽ol</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+E+J+M">Enrique Juan Mara帽贸n Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1903.01322v1-abstract-short" style="display: inline;"> Baggage inspection systems using X-ray screening are crucial for security. Only 90% of threat objects are recognized from the X-ray system based in human inspection. Manual detection requires high concentration due to the images complexity and the challenges objects points of view. An algorithm based on Bag of Visual Word (BoVW) with Selective Search is proposed in this paper for handguns detectio… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1903.01322v1-abstract-full').style.display = 'inline'; document.getElementById('1903.01322v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1903.01322v1-abstract-full" style="display: none;"> Baggage inspection systems using X-ray screening are crucial for security. Only 90% of threat objects are recognized from the X-ray system based in human inspection. Manual detection requires high concentration due to the images complexity and the challenges objects points of view. An algorithm based on Bag of Visual Word (BoVW) with Selective Search is proposed in this paper for handguns detection in single energy X-ray images from the public GDXray database. This approach is an adaptation of BoVW for X-ray baggage images context. In order to evaluate the proposed method the algorithm effectiveness recognition was tested on all bounding boxes returned by selective search algorithm in 200 images. The most relevant result is the precision and true positive rate (PPV = 80%, TPR= 92%). This approach achieves good performance for handgun recognition. In addition, it is the first time the Selective Search localization algorithm was tested in baggage X-ray images and showed possibilities with Bag of Visual Words. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1903.01322v1-abstract-full').style.display = 'none'; document.getElementById('1903.01322v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 March, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">in Spanish</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1901.07419">arXiv:1901.07419</a> <span> [<a href="https://arxiv.org/pdf/1901.07419">pdf</a>, <a href="https://arxiv.org/format/1901.07419">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Simultaneous lesion and neuroanatomy segmentation in Multiple Sclerosis using deep neural networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=McKinley%2C+R">Richard McKinley</a>, <a href="/search/cs?searchtype=author&query=Wepfer%2C+R">Rik Wepfer</a>, <a href="/search/cs?searchtype=author&query=Aschwanden%2C+F">Fabian Aschwanden</a>, <a href="/search/cs?searchtype=author&query=Grunder%2C+L">Lorenz Grunder</a>, <a href="/search/cs?searchtype=author&query=Muri%2C+R">Raphaela Muri</a>, <a href="/search/cs?searchtype=author&query=Rummel%2C+C">Christian Rummel</a>, <a href="/search/cs?searchtype=author&query=Verma%2C+R">Rajeev Verma</a>, <a href="/search/cs?searchtype=author&query=Weisstanner%2C+C">Christian Weisstanner</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Salmen%2C+A">Anke Salmen</a>, <a href="/search/cs?searchtype=author&query=Chan%2C+A">Andrew Chan</a>, <a href="/search/cs?searchtype=author&query=Wagner%2C+F">Franca Wagner</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+R">Roland Wiest</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1901.07419v3-abstract-short" style="display: inline;"> Segmentation of white matter lesions and deep grey matter structures is an important task in the quantification of magnetic resonance imaging in multiple sclerosis. In this paper we explore segmentation solutions based on convolutional neural networks (CNNs) for providing fast, reliable segmentations of lesions and grey-matter structures in multi-modal MR imaging, and the performance of these meth… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.07419v3-abstract-full').style.display = 'inline'; document.getElementById('1901.07419v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1901.07419v3-abstract-full" style="display: none;"> Segmentation of white matter lesions and deep grey matter structures is an important task in the quantification of magnetic resonance imaging in multiple sclerosis. In this paper we explore segmentation solutions based on convolutional neural networks (CNNs) for providing fast, reliable segmentations of lesions and grey-matter structures in multi-modal MR imaging, and the performance of these methods when applied to out-of-centre data. We trained two state-of-the-art fully convolutional CNN architectures on the 2016 MSSEG training dataset, which was annotated by seven independent human raters: a reference implementation of a 3D Unet, and a more recently proposed 3D-to-2D architecture (DeepSCAN). We then retrained those methods on a larger dataset from a single centre, with and without labels for other brain structures. We quantified changes in performance owing to dataset shift, and changes in performance by adding the additional brain-structure labels. We also compared performance with freely available reference methods. Both fully-convolutional CNN methods substantially outperform other approaches in the literature when trained and evaluated in cross-validation on the MSSEG dataset, showing agreement with human raters in the range of human inter-rater variability. Both architectures showed drops in performance when trained on single-centre data and tested on the MSSEG dataset. When trained with the addition of weak anatomical labels derived from Freesurfer, the performance of the 3D Unet degraded, while the performance of the DeepSCAN net improved. Overall, the DeepSCAN network predicting both lesion and anatomical labels was the best-performing network examined. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.07419v3-abstract-full').style.display = 'none'; document.getElementById('1901.07419v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 January, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Substantially revised version after comments from reviewers, including comparison to 3D Unet</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1812.02641">arXiv:1812.02641</a> <span> [<a href="https://arxiv.org/pdf/1812.02641">pdf</a>, <a href="https://arxiv.org/format/1812.02641">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Local Conditioning: Exact Message Passing for Cyclic Undirected Distributed Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Reyes%2C+M+G">Matthew G. Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1812.02641v1-abstract-short" style="display: inline;"> This paper addresses practical implementation of summing out, expanding, and reordering of messages in Local Conditioning (LC) for undirected networks. In particular, incoming messages conditioned on potentially different subsets of the receiving node's relevant set must be expanded to be conditioned on this relevant set, then reordered so that corresponding columns of the conditioned matrices can… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.02641v1-abstract-full').style.display = 'inline'; document.getElementById('1812.02641v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1812.02641v1-abstract-full" style="display: none;"> This paper addresses practical implementation of summing out, expanding, and reordering of messages in Local Conditioning (LC) for undirected networks. In particular, incoming messages conditioned on potentially different subsets of the receiving node's relevant set must be expanded to be conditioned on this relevant set, then reordered so that corresponding columns of the conditioned matrices can be fused through element-wise multiplication. An outgoing message is then reduced by summing out loop cutset nodes that are upstream of the outgoing edge. The emphasis on implementation is the primary contribution over the theoretical justification of LC given in Fay et al. Nevertheless, the complexity of Local Conditioning in grid networks is still no better than that of Clustering. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.02641v1-abstract-full').style.display = 'none'; document.getElementById('1812.02641v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 December, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work was presented at the Future Technologies Conference (FTC), Vancouver, Canada, November 2018</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proceedings of the Future Technologies Conference (FTC) 2018, Eds. Kohei Arai, Rahul Bhatia, and Supriya Kapoor, Vol. 2, Springer </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.04907">arXiv:1811.04907</a> <span> [<a href="https://arxiv.org/pdf/1811.04907">pdf</a>, <a href="https://arxiv.org/format/1811.04907">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning versus Classical Regression for Brain Tumor Patient Survival Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Suter%2C+Y">Yannick Suter</a>, <a href="/search/cs?searchtype=author&query=Jungo%2C+A">Alain Jungo</a>, <a href="/search/cs?searchtype=author&query=Rebsamen%2C+M">Michael Rebsamen</a>, <a href="/search/cs?searchtype=author&query=Knecht%2C+U">Urspeter Knecht</a>, <a href="/search/cs?searchtype=author&query=Herrmann%2C+E">Evelyn Herrmann</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.04907v1-abstract-short" style="display: inline;"> Deep learning for regression tasks on medical imaging data has shown promising results. However, compared to other approaches, their power is strongly linked to the dataset size. In this study, we evaluate 3D-convolutional neural networks (CNNs) and classical regression methods with hand-crafted features for survival time regression of patients with high grade brain tumors. The tested CNNs for reg… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.04907v1-abstract-full').style.display = 'inline'; document.getElementById('1811.04907v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.04907v1-abstract-full" style="display: none;"> Deep learning for regression tasks on medical imaging data has shown promising results. However, compared to other approaches, their power is strongly linked to the dataset size. In this study, we evaluate 3D-convolutional neural networks (CNNs) and classical regression methods with hand-crafted features for survival time regression of patients with high grade brain tumors. The tested CNNs for regression showed promising but unstable results. The best performing deep learning approach reached an accuracy of 51.5% on held-out samples of the training set. All tested deep learning experiments were outperformed by a Support Vector Classifier (SVC) using 30 radiomic features. The investigated features included intensity, shape, location and deep features. The submitted method to the BraTS 2018 survival prediction challenge is an ensemble of SVCs, which reached a cross-validated accuracy of 72.2% on the BraTS 2018 training set, 57.1% on the validation set, and 42.9% on the testing set. The results suggest that more training data is necessary for a stable performance of a CNN model for direct regression from magnetic resonance images, and that non-imaging clinical patient information is crucial along with imaging information. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.04907v1-abstract-full').style.display = 'none'; document.getElementById('1811.04907v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Contribution to The International Multimodal Brain Tumor Segmentation (BraTS) Challenge 2018, survival prediction task</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.02629">arXiv:1811.02629</a> <span> [<a href="https://arxiv.org/pdf/1811.02629">pdf</a>, <a href="https://arxiv.org/format/1811.02629">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Identifying the Best Machine Learning Algorithms for Brain Tumor Segmentation, Progression Assessment, and Overall Survival Prediction in the BRATS Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Bakas%2C+S">Spyridon Bakas</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Jakab%2C+A">Andras Jakab</a>, <a href="/search/cs?searchtype=author&query=Bauer%2C+S">Stefan Bauer</a>, <a href="/search/cs?searchtype=author&query=Rempfler%2C+M">Markus Rempfler</a>, <a href="/search/cs?searchtype=author&query=Crimi%2C+A">Alessandro Crimi</a>, <a href="/search/cs?searchtype=author&query=Shinohara%2C+R+T">Russell Takeshi Shinohara</a>, <a href="/search/cs?searchtype=author&query=Berger%2C+C">Christoph Berger</a>, <a href="/search/cs?searchtype=author&query=Ha%2C+S+M">Sung Min Ha</a>, <a href="/search/cs?searchtype=author&query=Rozycki%2C+M">Martin Rozycki</a>, <a href="/search/cs?searchtype=author&query=Prastawa%2C+M">Marcel Prastawa</a>, <a href="/search/cs?searchtype=author&query=Alberts%2C+E">Esther Alberts</a>, <a href="/search/cs?searchtype=author&query=Lipkova%2C+J">Jana Lipkova</a>, <a href="/search/cs?searchtype=author&query=Freymann%2C+J">John Freymann</a>, <a href="/search/cs?searchtype=author&query=Kirby%2C+J">Justin Kirby</a>, <a href="/search/cs?searchtype=author&query=Bilello%2C+M">Michel Bilello</a>, <a href="/search/cs?searchtype=author&query=Fathallah-Shaykh%2C+H">Hassan Fathallah-Shaykh</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&query=Kirschke%2C+J">Jan Kirschke</a>, <a href="/search/cs?searchtype=author&query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&query=Colen%2C+R">Rivka Colen</a>, <a href="/search/cs?searchtype=author&query=Kotrotsou%2C+A">Aikaterini Kotrotsou</a>, <a href="/search/cs?searchtype=author&query=Lamontagne%2C+P">Pamela Lamontagne</a>, <a href="/search/cs?searchtype=author&query=Marcus%2C+D">Daniel Marcus</a>, <a href="/search/cs?searchtype=author&query=Milchenko%2C+M">Mikhail Milchenko</a> , et al. (402 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.02629v3-abstract-short" style="display: inline;"> Gliomas are the most common primary brain malignancies, with different degrees of aggressiveness, variable prognosis and various heterogeneous histologic sub-regions, i.e., peritumoral edematous/invaded tissue, necrotic core, active and non-enhancing core. This intrinsic heterogeneity is also portrayed in their radio-phenotype, as their sub-regions are depicted by varying intensity profiles dissem… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.02629v3-abstract-full').style.display = 'inline'; document.getElementById('1811.02629v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.02629v3-abstract-full" style="display: none;"> Gliomas are the most common primary brain malignancies, with different degrees of aggressiveness, variable prognosis and various heterogeneous histologic sub-regions, i.e., peritumoral edematous/invaded tissue, necrotic core, active and non-enhancing core. This intrinsic heterogeneity is also portrayed in their radio-phenotype, as their sub-regions are depicted by varying intensity profiles disseminated across multi-parametric magnetic resonance imaging (mpMRI) scans, reflecting varying biological properties. Their heterogeneous shape, extent, and location are some of the factors that make these tumors difficult to resect, and in some cases inoperable. The amount of resected tumor is a factor also considered in longitudinal scans, when evaluating the apparent tumor for potential diagnosis of progression. Furthermore, there is mounting evidence that accurate segmentation of the various tumor sub-regions can offer the basis for quantitative image analysis towards prediction of patient overall survival. This study assesses the state-of-the-art machine learning (ML) methods used for brain tumor image analysis in mpMRI scans, during the last seven instances of the International Brain Tumor Segmentation (BraTS) challenge, i.e., 2012-2018. Specifically, we focus on i) evaluating segmentations of the various glioma sub-regions in pre-operative mpMRI scans, ii) assessing potential tumor progression by virtue of longitudinal growth of tumor sub-regions, beyond use of the RECIST/RANO criteria, and iii) predicting the overall survival from pre-operative mpMRI scans of patients that underwent gross total resection. Finally, we investigate the challenge of identifying the best ML algorithms for each of these tasks, considering that apart from being diverse on each instance of the challenge, the multi-institutional mpMRI BraTS dataset has also been a continuously evolving/growing dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.02629v3-abstract-full').style.display = 'none'; document.getElementById('1811.02629v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The International Multimodal Brain Tumor Segmentation (BraTS) Challenge</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1809.09468">arXiv:1809.09468</a> <span> [<a href="https://arxiv.org/pdf/1809.09468">pdf</a>, <a href="https://arxiv.org/format/1809.09468">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-02628-8">10.1007/978-3-030-02628-8 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Automatic brain tumor grading from MRI data using convolutional neural networks and quality assessment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Pereira%2C+S">Sergio Pereira</a>, <a href="/search/cs?searchtype=author&query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&query=Alves%2C+V">Victor Alves</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a>, <a href="/search/cs?searchtype=author&query=Silva%2C+C+A">Carlos A. Silva</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1809.09468v1-abstract-short" style="display: inline;"> Glioblastoma Multiforme is a high grade, very aggressive, brain tumor, with patients having a poor prognosis. Lower grade gliomas are less aggressive, but they can evolve into higher grade tumors over time. Patient management and treatment can vary considerably with tumor grade, ranging from tumor resection followed by a combined radio- and chemotherapy to a "wait and see" approach. Hence, tumor g… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1809.09468v1-abstract-full').style.display = 'inline'; document.getElementById('1809.09468v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1809.09468v1-abstract-full" style="display: none;"> Glioblastoma Multiforme is a high grade, very aggressive, brain tumor, with patients having a poor prognosis. Lower grade gliomas are less aggressive, but they can evolve into higher grade tumors over time. Patient management and treatment can vary considerably with tumor grade, ranging from tumor resection followed by a combined radio- and chemotherapy to a "wait and see" approach. Hence, tumor grading is important for adequate treatment planning and monitoring. The gold standard for tumor grading relies on histopathological diagnosis of biopsy specimens. However, this procedure is invasive, time consuming, and prone to sampling error. Given these disadvantages, automatic tumor grading from widely used MRI protocols would be clinically important, as a way to expedite treatment planning and assessment of tumor evolution. In this paper, we propose to use Convolutional Neural Networks for predicting tumor grade directly from imaging data. In this way, we overcome the need for expert annotations of regions of interest. We evaluate two prediction approaches: from the whole brain, and from an automatically defined tumor region. Finally, we employ interpretability methodologies as a quality assurance stage to check if the method is using image regions indicative of tumor grade for classification. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1809.09468v1-abstract-full').style.display = 'none'; document.getElementById('1809.09468v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 September, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted and presented at iMIMIC - Workshop on Interpretability of Machine Intelligence in Medical Image Computing</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1808.03274">arXiv:1808.03274</a> <span> [<a href="https://arxiv.org/pdf/1808.03274">pdf</a>, <a href="https://arxiv.org/format/1808.03274">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Introducing Computer Science to High School Students through Logic Programming </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yuen%2C+T+T">Timothy T. Yuen</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Maritza Reyes</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+Y">Yuanlin Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1808.03274v1-abstract-short" style="display: inline;"> This paper investigates how high school students in an introductory computer science course approach computing in the Logic Programming (LP) paradigm. This qualitative study shows how novice students operate within the LP paradigm while engaging in foundational computing concepts and skills: students are engaged in a cyclical process of abstraction, reasoning, and creating representations of their… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.03274v1-abstract-full').style.display = 'inline'; document.getElementById('1808.03274v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1808.03274v1-abstract-full" style="display: none;"> This paper investigates how high school students in an introductory computer science course approach computing in the Logic Programming (LP) paradigm. This qualitative study shows how novice students operate within the LP paradigm while engaging in foundational computing concepts and skills: students are engaged in a cyclical process of abstraction, reasoning, and creating representations of their ideas in code while also being informed by the (procedural) requirements and the revision/debugging process. As these computing concepts and skills are also expected in traditional approaches to introductory K-12 CS courses, this paper asserts that LP is a viable paradigm choice for high school novices. This paper is under consideration in Theory and Practice of Logic Programming (TPLP). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.03274v1-abstract-full').style.display = 'none'; document.getElementById('1808.03274v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 August, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under consideration in Theory and Practice of Logic Programming (TPLP). arXiv admin note: text overlap with arXiv:1706.09248</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1807.06356">arXiv:1807.06356</a> <span> [<a href="https://arxiv.org/pdf/1807.06356">pdf</a>, <a href="https://arxiv.org/format/1807.06356">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-00129-2_5">10.1007/978-3-030-00129-2_5 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Magnetic Resonance Fingerprinting Reconstruction via Spatiotemporal Convolutional Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Balsiger%2C+F">Fabian Balsiger</a>, <a href="/search/cs?searchtype=author&query=Konar%2C+A+S">Amaresha Shridhar Konar</a>, <a href="/search/cs?searchtype=author&query=Chikop%2C+S">Shivaprasad Chikop</a>, <a href="/search/cs?searchtype=author&query=Chandran%2C+V">Vimal Chandran</a>, <a href="/search/cs?searchtype=author&query=Scheidegger%2C+O">Olivier Scheidegger</a>, <a href="/search/cs?searchtype=author&query=Geethanath%2C+S">Sairam Geethanath</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1807.06356v2-abstract-short" style="display: inline;"> Magnetic resonance fingerprinting (MRF) quantifies multiple nuclear magnetic resonance parameters in a single and fast acquisition. Standard MRF reconstructs parametric maps using dictionary matching, which lacks scalability due to computational inefficiency. We propose to perform MRF map reconstruction using a spatiotemporal convolutional neural network, which exploits the relationship between ne… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1807.06356v2-abstract-full').style.display = 'inline'; document.getElementById('1807.06356v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1807.06356v2-abstract-full" style="display: none;"> Magnetic resonance fingerprinting (MRF) quantifies multiple nuclear magnetic resonance parameters in a single and fast acquisition. Standard MRF reconstructs parametric maps using dictionary matching, which lacks scalability due to computational inefficiency. We propose to perform MRF map reconstruction using a spatiotemporal convolutional neural network, which exploits the relationship between neighboring MRF signal evolutions to replace the dictionary matching. We evaluate our method on multiparametric brain scans and compare it to three recent MRF reconstruction approaches. Our method achieves state-of-the-art reconstruction accuracy and yields qualitatively more appealing maps compared to other reconstruction methods. In addition, the reconstruction time is significantly reduced compared to a dictionary-based approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1807.06356v2-abstract-full').style.display = 'none'; document.getElementById('1807.06356v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 July, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 July, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for Machine Learning for Medical Image Reconstruction (MLMIR) workshop at MICCAI 2018. The revision corrects Amaresha's last name and Section 2.1 (scanner type and flip angles)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.05473">arXiv:1806.05473</a> <span> [<a href="https://arxiv.org/pdf/1806.05473">pdf</a>, <a href="https://arxiv.org/format/1806.05473">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Efficient Active Learning for Image Classification and Segmentation using a Sample Selection and Conditional Generative Adversarial Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mahapatra%2C+D">Dwarikanath Mahapatra</a>, <a href="/search/cs?searchtype=author&query=Bozorgtabar%2C+B">Behzad Bozorgtabar</a>, <a href="/search/cs?searchtype=author&query=Thiran%2C+J">Jean-Philippe Thiran</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.05473v4-abstract-short" style="display: inline;"> Training robust deep learning (DL) systems for medical image classification or segmentation is challenging due to limited images covering different disease types and severity. We propose an active learning (AL) framework to select most informative samples and add to the training data. We use conditional generative adversarial networks (cGANs) to generate realistic chest xray images with different… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.05473v4-abstract-full').style.display = 'inline'; document.getElementById('1806.05473v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.05473v4-abstract-full" style="display: none;"> Training robust deep learning (DL) systems for medical image classification or segmentation is challenging due to limited images covering different disease types and severity. We propose an active learning (AL) framework to select most informative samples and add to the training data. We use conditional generative adversarial networks (cGANs) to generate realistic chest xray images with different disease characteristics by conditioning its generation on a real image sample. Informative samples to add to the training set are identified using a Bayesian neural network. Experiments show our proposed AL framework is able to achieve state of the art performance by using about 35% of the full dataset, thus saving significant time and effort over conventional methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.05473v4-abstract-full').style.display = 'none'; document.getElementById('1806.05473v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.04413">arXiv:1806.04413</a> <span> [<a href="https://arxiv.org/pdf/1806.04413">pdf</a>, <a href="https://arxiv.org/format/1806.04413">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-00931-1_13">10.1007/978-3-030-00931-1_13 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Enhancing clinical MRI Perfusion maps with data-driven maps of complementary nature for lesion outcome prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Pinto%2C+A">Adriano Pinto</a>, <a href="/search/cs?searchtype=author&query=Pereira%2C+S">Sergio Pereira</a>, <a href="/search/cs?searchtype=author&query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&query=Alves%2C+V">Victor Alves</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&query=Silva%2C+C+A">Carlos A. Silva</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.04413v1-abstract-short" style="display: inline;"> Stroke is the second most common cause of death in developed countries, where rapid clinical intervention can have a major impact on a patient's life. To perform the revascularization procedure, the decision making of physicians considers its risks and benefits based on multi-modal MRI and clinical experience. Therefore, automatic prediction of the ischemic stroke lesion outcome has the potential… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.04413v1-abstract-full').style.display = 'inline'; document.getElementById('1806.04413v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.04413v1-abstract-full" style="display: none;"> Stroke is the second most common cause of death in developed countries, where rapid clinical intervention can have a major impact on a patient's life. To perform the revascularization procedure, the decision making of physicians considers its risks and benefits based on multi-modal MRI and clinical experience. Therefore, automatic prediction of the ischemic stroke lesion outcome has the potential to assist the physician towards a better stroke assessment and information about tissue outcome. Typically, automatic methods consider the information of the standard kinetic models of diffusion and perfusion MRI (e.g. Tmax, TTP, MTT, rCBF, rCBV) to perform lesion outcome prediction. In this work, we propose a deep learning method to fuse this information with an automated data selection of the raw 4D PWI image information, followed by a data-driven deep-learning modeling of the underlying blood flow hemodynamics. We demonstrate the ability of the proposed approach to improve prediction of tissue at risk before therapy, as compared to only using the standard clinical perfusion maps, hence suggesting on the potential benefits of the proposed data-driven raw perfusion data modelling approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.04413v1-abstract-full').style.display = 'none'; document.getElementById('1806.04413v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at MICCAI 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.03106">arXiv:1806.03106</a> <span> [<a href="https://arxiv.org/pdf/1806.03106">pdf</a>, <a href="https://arxiv.org/format/1806.03106">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Uncertainty-driven Sanity Check: Application to Postoperative Brain Tumor Cavity Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jungo%2C+A">Alain Jungo</a>, <a href="/search/cs?searchtype=author&query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&query=Ermis%2C+E">Ekin Ermis</a>, <a href="/search/cs?searchtype=author&query=Herrmann%2C+E">Evelyn Herrmann</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.03106v1-abstract-short" style="display: inline;"> Uncertainty estimates of modern neuronal networks provide additional information next to the computed predictions and are thus expected to improve the understanding of the underlying model. Reliable uncertainties are particularly interesting for safety-critical computer-assisted applications in medicine, e.g., neurosurgical interventions and radiotherapy planning. We propose an uncertainty-driven… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.03106v1-abstract-full').style.display = 'inline'; document.getElementById('1806.03106v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.03106v1-abstract-full" style="display: none;"> Uncertainty estimates of modern neuronal networks provide additional information next to the computed predictions and are thus expected to improve the understanding of the underlying model. Reliable uncertainties are particularly interesting for safety-critical computer-assisted applications in medicine, e.g., neurosurgical interventions and radiotherapy planning. We propose an uncertainty-driven sanity check for the identification of segmentation results that need particular expert review. Our method uses a fully-convolutional neural network and computes uncertainty estimates by the principle of Monte Carlo dropout. We evaluate the performance of the proposed method on a clinical dataset with 30 postoperative brain tumor images. The method can segment the highly inhomogeneous resection cavities accurately (Dice coefficients 0.792 $\pm$ 0.154). Furthermore, the proposed sanity check is able to detect the worst segmentation and three out of the four outliers. The results highlight the potential of using the additional information from the model's parameter uncertainty to validate the segmentation performance of a deep learning model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.03106v1-abstract-full').style.display = 'none'; document.getElementById('1806.03106v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Appears in Medical Imaging with Deep Learning (MIDL), 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.02562">arXiv:1806.02562</a> <span> [<a href="https://arxiv.org/pdf/1806.02562">pdf</a>, <a href="https://arxiv.org/format/1806.02562">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> On the Effect of Inter-observer Variability for a Reliable Estimation of Uncertainty of Medical Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jungo%2C+A">Alain Jungo</a>, <a href="/search/cs?searchtype=author&query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&query=Ermis%2C+E">Ekin Ermis</a>, <a href="/search/cs?searchtype=author&query=Blatti-Moreno%2C+M">Marcela Blatti-Moreno</a>, <a href="/search/cs?searchtype=author&query=Herrmann%2C+E">Evelyn Herrmann</a>, <a href="/search/cs?searchtype=author&query=Wiest%2C+R">Roland Wiest</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Mauricio Reyes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.02562v1-abstract-short" style="display: inline;"> Uncertainty estimation methods are expected to improve the understanding and quality of computer-assisted methods used in medical applications (e.g., neurosurgical interventions, radiotherapy planning), where automated medical image segmentation is crucial. In supervised machine learning, a common practice to generate ground truth label data is to merge observer annotations. However, as many medic… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.02562v1-abstract-full').style.display = 'inline'; document.getElementById('1806.02562v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.02562v1-abstract-full" style="display: none;"> Uncertainty estimation methods are expected to improve the understanding and quality of computer-assisted methods used in medical applications (e.g., neurosurgical interventions, radiotherapy planning), where automated medical image segmentation is crucial. In supervised machine learning, a common practice to generate ground truth label data is to merge observer annotations. However, as many medical image tasks show a high inter-observer variability resulting from factors such as image quality, different levels of user expertise and domain knowledge, little is known as to how inter-observer variability and commonly used fusion methods affect the estimation of uncertainty of automated image segmentation. In this paper we analyze the effect of common image label fusion techniques on uncertainty estimation, and propose to learn the uncertainty among observers. The results highlight the negative effect of fusion methods applied in deep learning, to obtain reliable estimates of segmentation uncertainty. Additionally, we show that the learned observers' uncertainty can be combined with current standard Monte Carlo dropout Bayesian neural networks to characterize uncertainty of model's parameters. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.02562v1-abstract-full').style.display = 'none'; document.getElementById('1806.02562v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Appears in Medical Image Computing and Computer Assisted Interventions (MICCAI), 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1706.09248">arXiv:1706.09248</a> <span> [<a href="https://arxiv.org/pdf/1706.09248">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Logic Programming for an Introductory Computer Science Course for High School Students </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yuen%2C+T">Timothy Yuen</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+M">Maritz Reyes</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+Y">Yuanlin Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1706.09248v1-abstract-short" style="display: inline;"> This paper investigates how high school students approach computing through an introductory computer science course situated in the Logic Programming (LP) paradigm. This study shows how novice students operate within the LP paradigm while engaging in foundational computing concepts and skills, and presents a case for LP as a viable paradigm choice for introductory CS courses. </span> <span class="abstract-full has-text-grey-dark mathjax" id="1706.09248v1-abstract-full" style="display: none;"> This paper investigates how high school students approach computing through an introductory computer science course situated in the Logic Programming (LP) paradigm. This study shows how novice students operate within the LP paradigm while engaging in foundational computing concepts and skills, and presents a case for LP as a viable paradigm choice for introductory CS courses. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1706.09248v1-abstract-full').style.display = 'none'; document.getElementById('1706.09248v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 June, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Proceedings of the 2nd International Workshop on User-Oriented Logic Paradigms (IULP 2017), Editors: Claudia Schulz and Stefan Ellmauthaler</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Reyes%2C+M&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Reyes%2C+M&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Reyes%2C+M&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>