CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;6 of 6 results for author: <span class="mathjax">Al-Maskari, R</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Al-Maskari%2C+R">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Al-Maskari, R"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Al-Maskari%2C+R&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Al-Maskari, R"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.03880">arXiv:2501.03880</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.03880">pdf</a>, <a href="https://arxiv.org/format/2501.03880">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> SELMA3D challenge: Self-supervised learning for 3D light-sheet microscopy image segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Ying Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Al-Maskari%2C+R">Rami Al-Maskari</a>, <a href="/search/cs?searchtype=author&amp;query=Horvath%2C+I">Izabela Horvath</a>, <a href="/search/cs?searchtype=author&amp;query=Ali%2C+M">Mayar Ali</a>, <a href="/search/cs?searchtype=author&amp;query=Hoher%2C+L">Luciano Hoher</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Z">Zengming Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Zhai%2C+Z">Zhiwei Zhai</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+M">Mengzhe Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Xun%2C+D">Dejin Xun</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+T">Tony Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Goubran%2C+M">Maged Goubran</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yunheng Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Mori%2C+K">Kensaku Mori</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=Erturk%2C+A">Ali Erturk</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.03880v2-abstract-short" style="display: inline;"> Recent innovations in light sheet microscopy, paired with developments in tissue clearing techniques, enable the 3D imaging of large mammalian tissues with cellular resolution. Combined with the progress in large-scale data analysis, driven by deep learning, these innovations empower researchers to rapidly investigate the morphological and functional properties of diverse biological samples. Segme&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.03880v2-abstract-full').style.display = 'inline'; document.getElementById('2501.03880v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.03880v2-abstract-full" style="display: none;"> Recent innovations in light sheet microscopy, paired with developments in tissue clearing techniques, enable the 3D imaging of large mammalian tissues with cellular resolution. Combined with the progress in large-scale data analysis, driven by deep learning, these innovations empower researchers to rapidly investigate the morphological and functional properties of diverse biological samples. Segmentation, a crucial preliminary step in the analysis process, can be automated using domain-specific deep learning models with expert-level performance. However, these models exhibit high sensitivity to domain shifts, leading to a significant drop in accuracy when applied to data outside their training distribution. To address this limitation, and inspired by the recent success of self-supervised learning in training generalizable models, we organized the SELMA3D Challenge during the MICCAI 2024 conference. SELMA3D provides a vast collection of light-sheet images from cleared mice and human brains, comprising 35 large 3D images-each with over 1000^3 voxels-and 315 annotated small patches for finetuning, preliminary testing and final testing. The dataset encompasses diverse biological structures, including vessel-like and spot-like structures. Five teams participated in all phases of the challenge, and their proposed methods are reviewed in this paper. Quantitative and qualitative results from most participating teams demonstrate that self-supervised learning on large datasets improves segmentation model performance and generalization. We will continue to support and extend SELMA3D as an inaugural MICCAI challenge focused on self-supervised learning for 3D microscopy image segmentation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.03880v2-abstract-full').style.display = 'none'; document.getElementById('2501.03880v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">2st version</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.17670">arXiv:2312.17670</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.17670">pdf</a>, <a href="https://arxiv.org/format/2312.17670">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Tissues and Organs">q-bio.TO</span> </div> </div> <p class="title is-5 mathjax"> Benchmarking the CoW with the TopCoW Challenge: Topology-Aware Anatomical Segmentation of the Circle of Willis for CTA and MRA </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Musio%2C+F">Fabio Musio</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Y">Yihui Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Juchler%2C+N">Norman Juchler</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=Al-Maskari%2C+R">Rami Al-Maskari</a>, <a href="/search/cs?searchtype=author&amp;query=H%C3%B6her%2C+L">Luciano H枚her</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Hamamci%2C+I+E">Ibrahim Ethem Hamamci</a>, <a href="/search/cs?searchtype=author&amp;query=Sekuboyina%2C+A">Anjany Sekuboyina</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Houjing Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Prabhakar%2C+C">Chinmay Prabhakar</a>, <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Navarro%2C+F">Fernando Navarro</a>, <a href="/search/cs?searchtype=author&amp;query=Menten%2C+M">Martin Menten</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Vos%2C+I">Iris Vos</a>, <a href="/search/cs?searchtype=author&amp;query=Ruigrok%2C+Y">Ynte Ruigrok</a>, <a href="/search/cs?searchtype=author&amp;query=Velthuis%2C+B">Birgitta Velthuis</a>, <a href="/search/cs?searchtype=author&amp;query=Kuijf%2C+H">Hugo Kuijf</a>, <a href="/search/cs?searchtype=author&amp;query=H%C3%A4mmerli%2C+J">Julien H盲mmerli</a> , et al. (59 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.17670v3-abstract-short" style="display: inline;"> The Circle of Willis (CoW) is an important network of arteries connecting major circulations of the brain. Its vascular architecture is believed to affect the risk, severity, and clinical outcome of serious neuro-vascular diseases. However, characterizing the highly variable CoW anatomy is still a manual and time-consuming expert task. The CoW is usually imaged by two angiographic imaging modaliti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.17670v3-abstract-full').style.display = 'inline'; document.getElementById('2312.17670v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.17670v3-abstract-full" style="display: none;"> The Circle of Willis (CoW) is an important network of arteries connecting major circulations of the brain. Its vascular architecture is believed to affect the risk, severity, and clinical outcome of serious neuro-vascular diseases. However, characterizing the highly variable CoW anatomy is still a manual and time-consuming expert task. The CoW is usually imaged by two angiographic imaging modalities, magnetic resonance angiography (MRA) and computed tomography angiography (CTA), but there exist limited public datasets with annotations on CoW anatomy, especially for CTA. Therefore we organized the TopCoW Challenge in 2023 with the release of an annotated CoW dataset. The TopCoW dataset was the first public dataset with voxel-level annotations for thirteen possible CoW vessel components, enabled by virtual-reality (VR) technology. It was also the first large dataset with paired MRA and CTA from the same patients. TopCoW challenge formalized the CoW characterization problem as a multiclass anatomical segmentation task with an emphasis on topological metrics. We invited submissions worldwide for the CoW segmentation task, which attracted over 140 registered participants from four continents. The top performing teams managed to segment many CoW components to Dice scores around 90%, but with lower scores for communicating arteries and rare variants. There were also topological mistakes for predictions with high Dice scores. Additional topological analysis revealed further areas for improvement in detecting certain CoW components and matching CoW variant topology accurately. TopCoW represented a first attempt at benchmarking the CoW anatomical segmentation task for MRA and CTA, both morphologically and topologically. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.17670v3-abstract-full').style.display = 'none'; document.getElementById('2312.17670v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 11 figures, 9 tables. Summary Paper for the MICCAI TopCoW 2023 Challenge</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.02608">arXiv:2312.02608</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.02608">pdf</a>, <a href="https://arxiv.org/format/2312.02608">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Panoptica -- instance-wise evaluation of 3D semantic and instance segmentation maps </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%B6ller%2C+H">Hendrik M枚ller</a>, <a href="/search/cs?searchtype=author&amp;query=Buchner%2C+J+A">Josef A. Buchner</a>, <a href="/search/cs?searchtype=author&amp;query=de+la+Rosa%2C+E">Ezequiel de la Rosa</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Rosier%2C+M">Marcel Rosier</a>, <a href="/search/cs?searchtype=author&amp;query=Mekki%2C+I">Isra Mekki</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Negwer%2C+M">Moritz Negwer</a>, <a href="/search/cs?searchtype=author&amp;query=Al-Maskari%2C+R">Rami Al-Maskari</a>, <a href="/search/cs?searchtype=author&amp;query=Ert%C3%BCrk%2C+A">Ali Ert眉rk</a>, <a href="/search/cs?searchtype=author&amp;query=Vinayahalingam%2C+S">Shankeeth Vinayahalingam</a>, <a href="/search/cs?searchtype=author&amp;query=Isensee%2C+F">Fabian Isensee</a>, <a href="/search/cs?searchtype=author&amp;query=Pati%2C+S">Sarthak Pati</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a>, <a href="/search/cs?searchtype=author&amp;query=Ehrlich%2C+S+K">Stefan K. Ehrlich</a>, <a href="/search/cs?searchtype=author&amp;query=Reinke%2C+A">Annika Reinke</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.02608v1-abstract-short" style="display: inline;"> This paper introduces panoptica, a versatile and performance-optimized package designed for computing instance-wise segmentation quality metrics from 2D and 3D segmentation maps. panoptica addresses the limitations of existing metrics and provides a modular framework that complements the original intersection over union-based panoptic quality with other metrics, such as the distance metric Average&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02608v1-abstract-full').style.display = 'inline'; document.getElementById('2312.02608v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.02608v1-abstract-full" style="display: none;"> This paper introduces panoptica, a versatile and performance-optimized package designed for computing instance-wise segmentation quality metrics from 2D and 3D segmentation maps. panoptica addresses the limitations of existing metrics and provides a modular framework that complements the original intersection over union-based panoptic quality with other metrics, such as the distance metric Average Symmetric Surface Distance. The package is open-source, implemented in Python, and accompanied by comprehensive documentation and tutorials. panoptica employs a three-step metrics computation process to cover diverse use cases. The efficacy of panoptica is demonstrated on various real-world biomedical datasets, where an instance-wise evaluation is instrumental for an accurate representation of the underlying clinical task. Overall, we envision panoptica as a valuable tool facilitating in-depth evaluation of segmentation methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02608v1-abstract-full').style.display = 'none'; document.getElementById('2312.02608v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 6 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.00243">arXiv:2301.00243</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2301.00243">pdf</a>, <a href="https://arxiv.org/format/2301.00243">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Approaching Peak Ground Truth </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Wahle%2C+J">Johannes Wahle</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Wagner%2C+S">Sophia Wagner</a>, <a href="/search/cs?searchtype=author&amp;query=Al-Maskari%2C+R">Rami Al-Maskari</a>, <a href="/search/cs?searchtype=author&amp;query=Gryska%2C+E">Emilia Gryska</a>, <a href="/search/cs?searchtype=author&amp;query=Todorov%2C+M">Mihail Todorov</a>, <a href="/search/cs?searchtype=author&amp;query=Bukas%2C+C">Christina Bukas</a>, <a href="/search/cs?searchtype=author&amp;query=Meissen%2C+F">Felix Meissen</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+T">Tingying Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Ert%C3%BCrk%2C+A">Ali Ert眉rk</a>, <a href="/search/cs?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/cs?searchtype=author&amp;query=Heckemann%2C+R">Rolf Heckemann</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J">Jan Kirschke</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmer%2C+C">Claus Zimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a>, <a href="/search/cs?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.00243v3-abstract-short" style="display: inline;"> Machine learning models are typically evaluated by computing similarity with reference annotations and trained by maximizing similarity with such. Especially in the biomedical domain, annotations are subjective and suffer from low inter- and intra-rater reliability. Since annotations only reflect one interpretation of the real world, this can lead to sub-optimal predictions even though the model a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.00243v3-abstract-full').style.display = 'inline'; document.getElementById('2301.00243v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.00243v3-abstract-full" style="display: none;"> Machine learning models are typically evaluated by computing similarity with reference annotations and trained by maximizing similarity with such. Especially in the biomedical domain, annotations are subjective and suffer from low inter- and intra-rater reliability. Since annotations only reflect one interpretation of the real world, this can lead to sub-optimal predictions even though the model achieves high similarity scores. Here, the theoretical concept of PGT is introduced. PGT marks the point beyond which an increase in similarity with the \emph{reference annotation} stops translating to better RWMP. Additionally, a quantitative technique to approximate PGT by computing inter- and intra-rater reliability is proposed. Finally, four categories of PGT-aware strategies to evaluate and improve model performance are reviewed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.00243v3-abstract-full').style.display = 'none'; document.getElementById('2301.00243v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7pages, 2 figures (minor corrections to text, affiliations and layout)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.08209">arXiv:2205.08209</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.08209">pdf</a>, <a href="https://arxiv.org/format/2205.08209">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-031-34048-2_58">10.1007/978-3-031-34048-2_58 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> blob loss: instance imbalance aware loss functions for semantic segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Fidon%2C+L">Lucas Fidon</a>, <a href="/search/cs?searchtype=author&amp;query=Horvath%2C+I">Izabela Horvath</a>, <a href="/search/cs?searchtype=author&amp;query=Al-Maskari%2C+R">Rami Al-Maskari</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Hongwei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Bhatia%2C+H">Harsharan Bhatia</a>, <a href="/search/cs?searchtype=author&amp;query=Loehr%2C+T">Timo Loehr</a>, <a href="/search/cs?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a>, <a href="/search/cs?searchtype=author&amp;query=Erturk%2C+A">Ali Erturk</a>, <a href="/search/cs?searchtype=author&amp;query=Kirschke%2C+J">Jan Kirschke</a>, <a href="/search/cs?searchtype=author&amp;query=Peeken%2C+J+C">Jan C. Peeken</a>, <a href="/search/cs?searchtype=author&amp;query=Vercauteren%2C+T">Tom Vercauteren</a>, <a href="/search/cs?searchtype=author&amp;query=Zimmer%2C+C">Claus Zimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B">Bjoern Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.08209v3-abstract-short" style="display: inline;"> Deep convolutional neural networks (CNN) have proven to be remarkably effective in semantic segmentation tasks. Most popular loss functions were introduced targeting improved volumetric scores, such as the Dice coefficient (DSC). By design, DSC can tackle class imbalance, however, it does not recognize instance imbalance within a class. As a result, a large foreground instance can dominate minor i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.08209v3-abstract-full').style.display = 'inline'; document.getElementById('2205.08209v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.08209v3-abstract-full" style="display: none;"> Deep convolutional neural networks (CNN) have proven to be remarkably effective in semantic segmentation tasks. Most popular loss functions were introduced targeting improved volumetric scores, such as the Dice coefficient (DSC). By design, DSC can tackle class imbalance, however, it does not recognize instance imbalance within a class. As a result, a large foreground instance can dominate minor instances and still produce a satisfactory DSC. Nevertheless, detecting tiny instances is crucial for many applications, such as disease monitoring. For example, it is imperative to locate and surveil small-scale lesions in the follow-up of multiple sclerosis patients. We propose a novel family of loss functions, \emph{blob loss}, primarily aimed at maximizing instance-level detection metrics, such as F1 score and sensitivity. \emph{Blob loss} is designed for semantic segmentation problems where detecting multiple instances matters. We extensively evaluate a DSC-based \emph{blob loss} in five complex 3D semantic segmentation tasks featuring pronounced instance heterogeneity in terms of texture and morphology. Compared to soft Dice loss, we achieve 5% improvement for MS lesions, 3% improvement for liver tumor, and an average 2% improvement for microscopy segmentation tasks considering F1 score. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.08209v3-abstract-full').style.display = 'none'; document.getElementById('2205.08209v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23 pages, 7 figures // corrected one mistake where it said beta instead of alpha in the text</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.10993">arXiv:2104.10993</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2104.10993">pdf</a>, <a href="https://arxiv.org/format/2104.10993">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> METGAN: Generative Tumour Inpainting and Modality Synthesis in Light Sheet Microscopy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Horvath%2C+I">Izabela Horvath</a>, <a href="/search/cs?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/cs?searchtype=author&amp;query=Schoppe%2C+O">Oliver Schoppe</a>, <a href="/search/cs?searchtype=author&amp;query=Al-Maskari%2C+R">Rami Al-Maskari</a>, <a href="/search/cs?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/cs?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Hongwei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Ertuerk%2C+A">Ali Ertuerk</a>, <a href="/search/cs?searchtype=author&amp;query=Menze%2C+B+H">Bjoern H. Menze</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.10993v2-abstract-short" style="display: inline;"> Novel multimodal imaging methods are capable of generating extensive, super high resolution datasets for preclinical research. Yet, a massive lack of annotations prevents the broad use of deep learning to analyze such data. So far, existing generative models fail to mitigate this problem because of frequent labeling errors. In this paper, we introduce a novel generative method which leverages real&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.10993v2-abstract-full').style.display = 'inline'; document.getElementById('2104.10993v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.10993v2-abstract-full" style="display: none;"> Novel multimodal imaging methods are capable of generating extensive, super high resolution datasets for preclinical research. Yet, a massive lack of annotations prevents the broad use of deep learning to analyze such data. So far, existing generative models fail to mitigate this problem because of frequent labeling errors. In this paper, we introduce a novel generative method which leverages real anatomical information to generate realistic image-label pairs of tumours. We construct a dual-pathway generator, for the anatomical image and label, trained in a cycle-consistent setup, constrained by an independent, pretrained segmentor. The generated images yield significant quantitative improvement compared to existing methods. To validate the quality of synthesis, we train segmentation networks on a dataset augmented with the synthetic data, substantially improving the segmentation over baseline. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.10993v2-abstract-full').style.display = 'none'; document.getElementById('2104.10993v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10