CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;8 of 8 results for author: <span class="mathjax">Baheti, B</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/eess" aria-role="search"> Searching in archive <strong>eess</strong>. <a href="/search/?searchtype=author&amp;query=Baheti%2C+B">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Baheti, B"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Baheti%2C+B&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Baheti, B"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.18435">arXiv:2405.18435</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.18435">pdf</a>, <a href="https://arxiv.org/format/2405.18435">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> QUBIQ: Uncertainty Quantification for Biomedical Image Segmentation Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Li%2C+H+B">Hongwei Bran Li</a>, <a href="/search/eess?searchtype=author&amp;query=Navarro%2C+F">Fernando Navarro</a>, <a href="/search/eess?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/eess?searchtype=author&amp;query=Bayat%2C+A">Amirhossein Bayat</a>, <a href="/search/eess?searchtype=author&amp;query=Das%2C+D">Dhritiman Das</a>, <a href="/search/eess?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/eess?searchtype=author&amp;query=Shit%2C+S">Suprosanna Shit</a>, <a href="/search/eess?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/eess?searchtype=author&amp;query=Paetzold%2C+J+C">Johannes C. Paetzold</a>, <a href="/search/eess?searchtype=author&amp;query=Hu%2C+X">Xiaobin Hu</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Zimmer%2C+L">Lucas Zimmer</a>, <a href="/search/eess?searchtype=author&amp;query=Amiranashvili%2C+T">Tamaz Amiranashvili</a>, <a href="/search/eess?searchtype=author&amp;query=Prabhakar%2C+C">Chinmay Prabhakar</a>, <a href="/search/eess?searchtype=author&amp;query=Berger%2C+C">Christoph Berger</a>, <a href="/search/eess?searchtype=author&amp;query=Weidner%2C+J">Jonas Weidner</a>, <a href="/search/eess?searchtype=author&amp;query=Alonso-Basant%2C+M">Michelle Alonso-Basant</a>, <a href="/search/eess?searchtype=author&amp;query=Rashid%2C+A">Arif Rashid</a>, <a href="/search/eess?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/eess?searchtype=author&amp;query=Adel%2C+W">Wesam Adel</a>, <a href="/search/eess?searchtype=author&amp;query=Ali%2C+D">Deniz Ali</a>, <a href="/search/eess?searchtype=author&amp;query=Baheti%2C+B">Bhakti Baheti</a>, <a href="/search/eess?searchtype=author&amp;query=Bai%2C+Y">Yingbin Bai</a>, <a href="/search/eess?searchtype=author&amp;query=Bhatt%2C+I">Ishaan Bhatt</a>, <a href="/search/eess?searchtype=author&amp;query=Cetindag%2C+S+C">Sabri Can Cetindag</a> , et al. (55 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.18435v2-abstract-short" style="display: inline;"> Uncertainty in medical image segmentation tasks, especially inter-rater variability, arising from differences in interpretations and annotations by various experts, presents a significant challenge in achieving consistent and reliable image segmentation. This variability not only reflects the inherent complexity and subjective nature of medical image interpretation but also directly impacts the de&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.18435v2-abstract-full').style.display = 'inline'; document.getElementById('2405.18435v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.18435v2-abstract-full" style="display: none;"> Uncertainty in medical image segmentation tasks, especially inter-rater variability, arising from differences in interpretations and annotations by various experts, presents a significant challenge in achieving consistent and reliable image segmentation. This variability not only reflects the inherent complexity and subjective nature of medical image interpretation but also directly impacts the development and evaluation of automated segmentation algorithms. Accurately modeling and quantifying this variability is essential for enhancing the robustness and clinical applicability of these algorithms. We report the set-up and summarize the benchmark results of the Quantification of Uncertainties in Biomedical Image Quantification Challenge (QUBIQ), which was organized in conjunction with International Conferences on Medical Image Computing and Computer-Assisted Intervention (MICCAI) 2020 and 2021. The challenge focuses on the uncertainty quantification of medical image segmentation which considers the omnipresence of inter-rater variability in imaging datasets. The large collection of images with multi-rater annotations features various modalities such as MRI and CT; various organs such as the brain, prostate, kidney, and pancreas; and different image dimensions 2D-vs-3D. A total of 24 teams submitted different solutions to the problem, combining various baseline models, Bayesian neural networks, and ensemble model techniques. The obtained results indicate the importance of the ensemble models, as well as the need for further research to develop efficient 3D methods for uncertainty quantification methods in 3D segmentation tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.18435v2-abstract-full').style.display = 'none'; document.getElementById('2405.18435v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">initial technical report</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.01318">arXiv:2308.01318</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.01318">pdf</a>, <a href="https://arxiv.org/format/2308.01318">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> </div> <p class="title is-5 mathjax"> Framing image registration as a landmark detection problem for label-noise-aware task representation (HitR) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/eess?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Campi%2C+F">Francesco Campi</a>, <a href="/search/eess?searchtype=author&amp;query=Kukuljan%2C+I">Ivan Kukuljan</a>, <a href="/search/eess?searchtype=author&amp;query=Ehrlich%2C+S">Stefan Ehrlich</a>, <a href="/search/eess?searchtype=author&amp;query=Vinayahalingam%2C+S">Shankeeth Vinayahalingam</a>, <a href="/search/eess?searchtype=author&amp;query=Baheti%2C+B">Bhakti Baheti</a>, <a href="/search/eess?searchtype=author&amp;query=Chakrabarty%2C+S">Satrajit Chakrabarty</a>, <a href="/search/eess?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/eess?searchtype=author&amp;query=Bakas%2C+S">Spyridon Bakas</a>, <a href="/search/eess?searchtype=author&amp;query=Schwarting%2C+J">Julian Schwarting</a>, <a href="/search/eess?searchtype=author&amp;query=Metz%2C+M">Marie Metz</a>, <a href="/search/eess?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Heckemann%2C+R+A">Rolf A. Heckemann</a>, <a href="/search/eess?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a>, <a href="/search/eess?searchtype=author&amp;query=Menze%2C+B+H">Bjoern H. Menze</a>, <a href="/search/eess?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.01318v2-abstract-short" style="display: inline;"> Accurate image registration is pivotal in biomedical image analysis, where selecting suitable registration algorithms demands careful consideration. While numerous algorithms are available, the evaluation metrics to assess their performance have remained relatively static. This study addresses this challenge by introducing a novel evaluation metric termed Landmark Hit Rate (HitR), which focuses on&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.01318v2-abstract-full').style.display = 'inline'; document.getElementById('2308.01318v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.01318v2-abstract-full" style="display: none;"> Accurate image registration is pivotal in biomedical image analysis, where selecting suitable registration algorithms demands careful consideration. While numerous algorithms are available, the evaluation metrics to assess their performance have remained relatively static. This study addresses this challenge by introducing a novel evaluation metric termed Landmark Hit Rate (HitR), which focuses on the clinical relevance of image registration accuracy. Unlike traditional metrics such as Target Registration Error, which emphasize subresolution differences, HitR considers whether registration algorithms successfully position landmarks within defined confidence zones. This paradigm shift acknowledges the inherent annotation noise in medical images, allowing for more meaningful assessments. To equip HitR with label-noise-awareness, we propose defining these confidence zones based on an Inter-rater Variance analysis. Consequently, hit rate curves are computed for varying landmark zone sizes, enabling performance measurement for a task-specific level of accuracy. Our approach offers a more realistic and meaningful assessment of image registration algorithms, reflecting their suitability for clinical and biomedical applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.01318v2-abstract-full').style.display = 'none'; document.getElementById('2308.01318v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.18164">arXiv:2305.18164</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.18164">pdf</a>, <a href="https://arxiv.org/format/2305.18164">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Generative Adversarial Networks based Skin Lesion Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Innani%2C+S">Shubham Innani</a>, <a href="/search/eess?searchtype=author&amp;query=Dutande%2C+P">Prasad Dutande</a>, <a href="/search/eess?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/eess?searchtype=author&amp;query=Pokuri%2C+V">Venu Pokuri</a>, <a href="/search/eess?searchtype=author&amp;query=Bakas%2C+S">Spyridon Bakas</a>, <a href="/search/eess?searchtype=author&amp;query=Talbar%2C+S">Sanjay Talbar</a>, <a href="/search/eess?searchtype=author&amp;query=Baheti%2C+B">Bhakti Baheti</a>, <a href="/search/eess?searchtype=author&amp;query=Guntuku%2C+S+C">Sharath Chandra Guntuku</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.18164v2-abstract-short" style="display: inline;"> Skin cancer is a serious condition that requires accurate diagnosis and treatment. One way to assist clinicians in this task is using computer-aided diagnosis (CAD) tools that automatically segment skin lesions from dermoscopic images. We propose a novel adversarial learning-based framework called Efficient-GAN (EGAN) that uses an unsupervised generative network to generate accurate lesion masks.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.18164v2-abstract-full').style.display = 'inline'; document.getElementById('2305.18164v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.18164v2-abstract-full" style="display: none;"> Skin cancer is a serious condition that requires accurate diagnosis and treatment. One way to assist clinicians in this task is using computer-aided diagnosis (CAD) tools that automatically segment skin lesions from dermoscopic images. We propose a novel adversarial learning-based framework called Efficient-GAN (EGAN) that uses an unsupervised generative network to generate accurate lesion masks. It consists of a generator module with a top-down squeeze excitation-based compound scaled path, an asymmetric lateral connection-based bottom-up path, and a discriminator module that distinguishes between original and synthetic masks. A morphology-based smoothing loss is also implemented to encourage the network to create smooth semantic boundaries of lesions. The framework is evaluated on the International Skin Imaging Collaboration (ISIC) Lesion Dataset 2018. It outperforms the current state-of-the-art skin lesion segmentation approaches with a Dice coefficient, Jaccard similarity, and Accuracy of 90.1%, 83.6%, and 94.5%, respectively. We also design a lightweight segmentation framework (MGAN) that achieves comparable performance as EGAN but with an order of magnitude lower number of training parameters, thus resulting in faster inference times for low compute resource settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.18164v2-abstract-full').style.display = 'none'; document.getElementById('2305.18164v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in Nature Scientific Reports</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.01601">arXiv:2304.01601</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2304.01601">pdf</a>, <a href="https://arxiv.org/format/2304.01601">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Primitive Simultaneous Optimization of Similarity Metrics for Image Registration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Waldmannstetter%2C+D">Diana Waldmannstetter</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Schwarting%2C+J">Julian Schwarting</a>, <a href="/search/eess?searchtype=author&amp;query=Ezhov%2C+I">Ivan Ezhov</a>, <a href="/search/eess?searchtype=author&amp;query=Metz%2C+M">Marie Metz</a>, <a href="/search/eess?searchtype=author&amp;query=Bakas%2C+S">Spyridon Bakas</a>, <a href="/search/eess?searchtype=author&amp;query=Baheti%2C+B">Bhakti Baheti</a>, <a href="/search/eess?searchtype=author&amp;query=Chakrabarty%2C+S">Satrajit Chakrabarty</a>, <a href="/search/eess?searchtype=author&amp;query=Rueckert%2C+D">Daniel Rueckert</a>, <a href="/search/eess?searchtype=author&amp;query=Kirschke%2C+J+S">Jan S. Kirschke</a>, <a href="/search/eess?searchtype=author&amp;query=Heckemann%2C+R+A">Rolf A. Heckemann</a>, <a href="/search/eess?searchtype=author&amp;query=Piraud%2C+M">Marie Piraud</a>, <a href="/search/eess?searchtype=author&amp;query=Menze%2C+B+H">Bjoern H. Menze</a>, <a href="/search/eess?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.01601v3-abstract-short" style="display: inline;"> Even though simultaneous optimization of similarity metrics is a standard procedure in the field of semantic segmentation, surprisingly, this is much less established for image registration. To help closing this gap in the literature, we investigate in a complex multi-modal 3D setting whether simultaneous optimization of registration metrics, here implemented by means of primitive summation, can b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.01601v3-abstract-full').style.display = 'inline'; document.getElementById('2304.01601v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.01601v3-abstract-full" style="display: none;"> Even though simultaneous optimization of similarity metrics is a standard procedure in the field of semantic segmentation, surprisingly, this is much less established for image registration. To help closing this gap in the literature, we investigate in a complex multi-modal 3D setting whether simultaneous optimization of registration metrics, here implemented by means of primitive summation, can benefit image registration. We evaluate two challenging datasets containing collections of pre- to post-operative and pre- to intra-operative MR images of glioma. Employing the proposed optimization, we demonstrate improved registration accuracy in terms of TRE on expert neuroradiologists&#39; landmark annotations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.01601v3-abstract-full').style.display = 'none'; document.getElementById('2304.01601v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.00669">arXiv:2302.00669</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2302.00669">pdf</a>, <a href="https://arxiv.org/ps/2302.00669">ps</a>, <a href="https://arxiv.org/format/2302.00669">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Detecting Histologic &amp; Clinical Glioblastoma Patterns of Prognostic Relevance </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Baheti%2C+B">Bhakti Baheti</a>, <a href="/search/eess?searchtype=author&amp;query=Rai%2C+S">Sunny Rai</a>, <a href="/search/eess?searchtype=author&amp;query=Innani%2C+S">Shubham Innani</a>, <a href="/search/eess?searchtype=author&amp;query=Mehdiratta%2C+G">Garv Mehdiratta</a>, <a href="/search/eess?searchtype=author&amp;query=Guntuku%2C+S+C">Sharath Chandra Guntuku</a>, <a href="/search/eess?searchtype=author&amp;query=Nasrallah%2C+M+P">MacLean P. Nasrallah</a>, <a href="/search/eess?searchtype=author&amp;query=Bakas%2C+S">Spyridon Bakas</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.00669v2-abstract-short" style="display: inline;"> Glioblastoma is the most common and aggressive malignant adult tumor of the central nervous system, with a grim prognosis and heterogeneous morphologic and molecular profiles. Since adopting the current standard-of-care treatment 18 years ago, no substantial prognostic improvement has been noticed. Accurate prediction of patient overall survival (OS) from histopathology whole slide images (WSI) in&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.00669v2-abstract-full').style.display = 'inline'; document.getElementById('2302.00669v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.00669v2-abstract-full" style="display: none;"> Glioblastoma is the most common and aggressive malignant adult tumor of the central nervous system, with a grim prognosis and heterogeneous morphologic and molecular profiles. Since adopting the current standard-of-care treatment 18 years ago, no substantial prognostic improvement has been noticed. Accurate prediction of patient overall survival (OS) from histopathology whole slide images (WSI) integrated with clinical data using advanced computational methods could optimize clinical decision-making and patient management. Here, we focus on identifying prognostically relevant glioblastoma characteristics from H&amp;E stained WSI &amp; clinical data relating to OS. The exact approach for WSI capitalizes on the comprehensive curation of apparent artifactual content and an interpretability mechanism via a weakly supervised attention-based multiple-instance learning algorithm that further utilizes clustering to constrain the search space. The automatically placed patterns of high diagnostic value classify each WSI as representative of short or long-survivors. Further assessment of the prognostic relevance of the associated clinical patient data is performed both in isolation and in an integrated manner, using XGBoost and SHapley Additive exPlanations (SHAP). Identifying tumor morphological &amp; clinical patterns associated with short and long OS will enable the clinical neuropathologist to provide additional relevant prognostic information to the treating team and suggest avenues of biological investigation for understanding and potentially treating glioblastoma. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.00669v2-abstract-full').style.display = 'none'; document.getElementById('2302.00669v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.06226">arXiv:2301.06226</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2301.06226">pdf</a>, <a href="https://arxiv.org/format/2301.06226">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning based Novel Cascaded Approach for Skin Lesion Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Innani%2C+S">Shubham Innani</a>, <a href="/search/eess?searchtype=author&amp;query=Dutande%2C+P">Prasad Dutande</a>, <a href="/search/eess?searchtype=author&amp;query=Baheti%2C+B">Bhakti Baheti</a>, <a href="/search/eess?searchtype=author&amp;query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/eess?searchtype=author&amp;query=Talbar%2C+S">Sanjay Talbar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.06226v1-abstract-short" style="display: inline;"> Automatic lesion analysis is critical in skin cancer diagnosis and ensures effective treatment. The computer aided diagnosis of such skin cancer in dermoscopic images can significantly reduce the clinicians workload and help improve diagnostic accuracy. Although researchers are working extensively to address this problem, early detection and accurate identification of skin lesions remain challengi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.06226v1-abstract-full').style.display = 'inline'; document.getElementById('2301.06226v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.06226v1-abstract-full" style="display: none;"> Automatic lesion analysis is critical in skin cancer diagnosis and ensures effective treatment. The computer aided diagnosis of such skin cancer in dermoscopic images can significantly reduce the clinicians workload and help improve diagnostic accuracy. Although researchers are working extensively to address this problem, early detection and accurate identification of skin lesions remain challenging. This research focuses on a two step framework for skin lesion segmentation followed by classification for lesion analysis. We explored the effectiveness of deep convolutional neural network based architectures by designing an encoder-decoder architecture for skin lesion segmentation and CNN based classification network. The proposed approaches are evaluated quantitatively in terms of the Accuracy, mean Intersection over Union and Dice Similarity Coefficient. Our cascaded end to end deep learning based approach is the first of its kind, where the classification accuracy of the lesion is significantly improved because of prior segmentation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.06226v1-abstract-full').style.display = 'none'; document.getElementById('2301.06226v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to be published in 7th International Conference, CVIP 2022, Nagpur, India November 04-06, 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.06979">arXiv:2112.06979</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.06979">pdf</a>, <a href="https://arxiv.org/format/2112.06979">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> The Brain Tumor Sequence Registration (BraTS-Reg) Challenge: Establishing Correspondence Between Pre-Operative and Follow-up MRI Scans of Diffuse Glioma Patients </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Baheti%2C+B">Bhakti Baheti</a>, <a href="/search/eess?searchtype=author&amp;query=Chakrabarty%2C+S">Satrajit Chakrabarty</a>, <a href="/search/eess?searchtype=author&amp;query=Akbari%2C+H">Hamed Akbari</a>, <a href="/search/eess?searchtype=author&amp;query=Bilello%2C+M">Michel Bilello</a>, <a href="/search/eess?searchtype=author&amp;query=Wiestler%2C+B">Benedikt Wiestler</a>, <a href="/search/eess?searchtype=author&amp;query=Schwarting%2C+J">Julian Schwarting</a>, <a href="/search/eess?searchtype=author&amp;query=Calabrese%2C+E">Evan Calabrese</a>, <a href="/search/eess?searchtype=author&amp;query=Rudie%2C+J">Jeffrey Rudie</a>, <a href="/search/eess?searchtype=author&amp;query=Abidi%2C+S">Syed Abidi</a>, <a href="/search/eess?searchtype=author&amp;query=Mousa%2C+M">Mina Mousa</a>, <a href="/search/eess?searchtype=author&amp;query=Villanueva-Meyer%2C+J">Javier Villanueva-Meyer</a>, <a href="/search/eess?searchtype=author&amp;query=Fields%2C+B+K+K">Brandon K. K. Fields</a>, <a href="/search/eess?searchtype=author&amp;query=Kofler%2C+F">Florian Kofler</a>, <a href="/search/eess?searchtype=author&amp;query=Shinohara%2C+R+T">Russell Takeshi Shinohara</a>, <a href="/search/eess?searchtype=author&amp;query=Iglesias%2C+J+E">Juan Eugenio Iglesias</a>, <a href="/search/eess?searchtype=author&amp;query=Mok%2C+T+C+W">Tony C. W. Mok</a>, <a href="/search/eess?searchtype=author&amp;query=Chung%2C+A+C+S">Albert C. S. Chung</a>, <a href="/search/eess?searchtype=author&amp;query=Wodzinski%2C+M">Marek Wodzinski</a>, <a href="/search/eess?searchtype=author&amp;query=Jurgas%2C+A">Artur Jurgas</a>, <a href="/search/eess?searchtype=author&amp;query=Marini%2C+N">Niccolo Marini</a>, <a href="/search/eess?searchtype=author&amp;query=Atzori%2C+M">Manfredo Atzori</a>, <a href="/search/eess?searchtype=author&amp;query=Muller%2C+H">Henning Muller</a>, <a href="/search/eess?searchtype=author&amp;query=Grobroehmer%2C+C">Christoph Grobroehmer</a>, <a href="/search/eess?searchtype=author&amp;query=Siebert%2C+H">Hanna Siebert</a>, <a href="/search/eess?searchtype=author&amp;query=Hansen%2C+L">Lasse Hansen</a> , et al. (48 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.06979v2-abstract-short" style="display: inline;"> Registration of longitudinal brain MRI scans containing pathologies is challenging due to dramatic changes in tissue appearance. Although there has been progress in developing general-purpose medical image registration techniques, they have not yet attained the requisite precision and reliability for this task, highlighting its inherent complexity. Here we describe the Brain Tumor Sequence Registr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.06979v2-abstract-full').style.display = 'inline'; document.getElementById('2112.06979v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.06979v2-abstract-full" style="display: none;"> Registration of longitudinal brain MRI scans containing pathologies is challenging due to dramatic changes in tissue appearance. Although there has been progress in developing general-purpose medical image registration techniques, they have not yet attained the requisite precision and reliability for this task, highlighting its inherent complexity. Here we describe the Brain Tumor Sequence Registration (BraTS-Reg) challenge, as the first public benchmark environment for deformable registration algorithms focusing on estimating correspondences between pre-operative and follow-up scans of the same patient diagnosed with a diffuse brain glioma. The BraTS-Reg data comprise de-identified multi-institutional multi-parametric MRI (mpMRI) scans, curated for size and resolution according to a canonical anatomical template, and divided into training, validation, and testing sets. Clinical experts annotated ground truth (GT) landmark points of anatomical locations distinct across the temporal domain. Quantitative evaluation and ranking were based on the Median Euclidean Error (MEE), Robustness, and the determinant of the Jacobian of the displacement field. The top-ranked methodologies yielded similar performance across all evaluation metrics and shared several methodological commonalities, including pre-alignment, deep neural networks, inverse consistency analysis, and test-time instance optimization per-case basis as a post-processing step. The top-ranked method attained the MEE at or below that of the inter-rater variability for approximately 60% of the evaluated landmarks, underscoring the scope for further accuracy and robustness improvements, especially relative to human experts. The aim of BraTS-Reg is to continue to serve as an active resource for research, with the data and online evaluation tools accessible at https://bratsreg.github.io/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.06979v2-abstract-full').style.display = 'none'; document.getElementById('2112.06979v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.09754">arXiv:2004.09754</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2004.09754">pdf</a>, <a href="https://arxiv.org/format/2004.09754">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> The 1st Agriculture-Vision Challenge: Methods and Results </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Chiu%2C+M+T">Mang Tik Chiu</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+X">Xingqian Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+K">Kai Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Hobbs%2C+J">Jennifer Hobbs</a>, <a href="/search/eess?searchtype=author&amp;query=Hovakimyan%2C+N">Naira Hovakimyan</a>, <a href="/search/eess?searchtype=author&amp;query=Huang%2C+T+S">Thomas S. Huang</a>, <a href="/search/eess?searchtype=author&amp;query=Shi%2C+H">Honghui Shi</a>, <a href="/search/eess?searchtype=author&amp;query=Wei%2C+Y">Yunchao Wei</a>, <a href="/search/eess?searchtype=author&amp;query=Huang%2C+Z">Zilong Huang</a>, <a href="/search/eess?searchtype=author&amp;query=Schwing%2C+A">Alexander Schwing</a>, <a href="/search/eess?searchtype=author&amp;query=Brunner%2C+R">Robert Brunner</a>, <a href="/search/eess?searchtype=author&amp;query=Dozier%2C+I">Ivan Dozier</a>, <a href="/search/eess?searchtype=author&amp;query=Dozier%2C+W">Wyatt Dozier</a>, <a href="/search/eess?searchtype=author&amp;query=Ghandilyan%2C+K">Karen Ghandilyan</a>, <a href="/search/eess?searchtype=author&amp;query=Wilson%2C+D">David Wilson</a>, <a href="/search/eess?searchtype=author&amp;query=Park%2C+H">Hyunseong Park</a>, <a href="/search/eess?searchtype=author&amp;query=Kim%2C+J">Junhee Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Kim%2C+S">Sungho Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+Q">Qinghui Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Kampffmeyer%2C+M+C">Michael C. Kampffmeyer</a>, <a href="/search/eess?searchtype=author&amp;query=Jenssen%2C+R">Robert Jenssen</a>, <a href="/search/eess?searchtype=author&amp;query=Salberg%2C+A+B">Arnt B. Salberg</a>, <a href="/search/eess?searchtype=author&amp;query=Barbosa%2C+A">Alexandre Barbosa</a>, <a href="/search/eess?searchtype=author&amp;query=Trevisan%2C+R">Rodrigo Trevisan</a>, <a href="/search/eess?searchtype=author&amp;query=Zhao%2C+B">Bingchen Zhao</a> , et al. (17 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.09754v2-abstract-short" style="display: inline;"> The first Agriculture-Vision Challenge aims to encourage research in developing novel and effective algorithms for agricultural pattern recognition from aerial images, especially for the semantic segmentation task associated with our challenge dataset. Around 57 participating teams from various countries compete to achieve state-of-the-art in aerial agriculture semantic segmentation. The Agricultu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.09754v2-abstract-full').style.display = 'inline'; document.getElementById('2004.09754v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.09754v2-abstract-full" style="display: none;"> The first Agriculture-Vision Challenge aims to encourage research in developing novel and effective algorithms for agricultural pattern recognition from aerial images, especially for the semantic segmentation task associated with our challenge dataset. Around 57 participating teams from various countries compete to achieve state-of-the-art in aerial agriculture semantic segmentation. The Agriculture-Vision Challenge Dataset was employed, which comprises of 21,061 aerial and multi-spectral farmland images. This paper provides a summary of notable methods and results in the challenge. Our submission server and leaderboard will continue to open for researchers that are interested in this challenge dataset and task; the link can be found here. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.09754v2-abstract-full').style.display = 'none'; document.getElementById('2004.09754v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CVPR 2020 Workshop</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10