CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–32 of 32 results for author: <span class="mathjax">Stegmaier, J</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Stegmaier%2C+J">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Stegmaier, J"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Stegmaier%2C+J&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Stegmaier, J"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11515">arXiv:2411.11515</a> <span> [<a href="https://arxiv.org/pdf/2411.11515">pdf</a>, <a href="https://arxiv.org/format/2411.11515">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Cascaded Diffusion Models for 2D and 3D Microscopy Image Synthesis to Enhance Cell Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yilmaz%2C+R">R眉veyda Yilmaz</a>, <a href="/search/cs?searchtype=author&query=Keven%2C+K">Kaan Keven</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+Y">Yuli Wu</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11515v2-abstract-short" style="display: inline;"> Automated cell segmentation in microscopy images is essential for biomedical research, yet conventional methods are labor-intensive and prone to error. While deep learning-based approaches have proven effective, they often require large annotated datasets, which are scarce due to the challenges of manual annotation. To overcome this, we propose a novel framework for synthesizing densely annotated… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11515v2-abstract-full').style.display = 'inline'; document.getElementById('2411.11515v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11515v2-abstract-full" style="display: none;"> Automated cell segmentation in microscopy images is essential for biomedical research, yet conventional methods are labor-intensive and prone to error. While deep learning-based approaches have proven effective, they often require large annotated datasets, which are scarce due to the challenges of manual annotation. To overcome this, we propose a novel framework for synthesizing densely annotated 2D and 3D cell microscopy images using cascaded diffusion models. Our method synthesizes 2D and 3D cell masks from sparse 2D annotations using multi-level diffusion models and NeuS, a 3D surface reconstruction approach. Following that, a pretrained 2D Stable Diffusion model is finetuned to generate realistic cell textures and the final outputs are combined to form cell populations. We show that training a segmentation model with a combination of our synthetic data and real data improves cell segmentation performance by up to 9\% across multiple datasets. Additionally, the FID scores indicate that the synthetic data closely resembles real data. The code for our proposed approach will be available at https://github.com/ruveydayilmaz0/cascaded_diffusion. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11515v2-abstract-full').style.display = 'none'; document.getElementById('2411.11515v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.11688">arXiv:2410.11688</a> <span> [<a href="https://arxiv.org/pdf/2410.11688">pdf</a>, <a href="https://arxiv.org/format/2410.11688">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> Visual Fixation-Based Retinal Prosthetic Simulation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wu%2C+Y">Yuli Wu</a>, <a href="/search/cs?searchtype=author&query=Nguyen%2C+D+D+T">Do Dinh Tan Nguyen</a>, <a href="/search/cs?searchtype=author&query=Konermann%2C+H">Henning Konermann</a>, <a href="/search/cs?searchtype=author&query=Yilmaz%2C+R">R眉veyda Yilmaz</a>, <a href="/search/cs?searchtype=author&query=Walter%2C+P">Peter Walter</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.11688v1-abstract-short" style="display: inline;"> This study proposes a retinal prosthetic simulation framework driven by visual fixations, inspired by the saccade mechanism, and assesses performance improvements through end-to-end optimization in a classification task. Salient patches are predicted from input images using the self-attention map of a vision transformer to mimic visual fixations. These patches are then encoded by a trainable U-Net… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11688v1-abstract-full').style.display = 'inline'; document.getElementById('2410.11688v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.11688v1-abstract-full" style="display: none;"> This study proposes a retinal prosthetic simulation framework driven by visual fixations, inspired by the saccade mechanism, and assesses performance improvements through end-to-end optimization in a classification task. Salient patches are predicted from input images using the self-attention map of a vision transformer to mimic visual fixations. These patches are then encoded by a trainable U-Net and simulated using the pulse2percept framework to predict visual percepts. By incorporating a learnable encoder, we aim to optimize the visual information transmitted to the retinal implant, addressing both the limited resolution of the electrode array and the distortion between the input stimuli and resulting phosphenes. The predicted percepts are evaluated using the self-supervised DINOv2 foundation model, with an optional learnable linear layer for classification accuracy. On a subset of the ImageNet validation set, the fixation-based framework achieves a classification accuracy of 87.72%, using computational parameters based on a real subject's physiological data, significantly outperforming the downsampling-based accuracy of 40.59% and approaching the healthy upper bound of 92.76%. Our approach shows promising potential for producing more semantically understandable percepts with the limited resolution available in retinal prosthetics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11688v1-abstract-full').style.display = 'none'; document.getElementById('2410.11688v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.17808">arXiv:2403.17808</a> <span> [<a href="https://arxiv.org/pdf/2403.17808">pdf</a>, <a href="https://arxiv.org/ps/2403.17808">ps</a>, <a href="https://arxiv.org/format/2403.17808">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Annotated Biomedical Video Generation using Denoising Diffusion Probabilistic Models and Flow Fields </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yilmaz%2C+R">R眉veyda Yilmaz</a>, <a href="/search/cs?searchtype=author&query=Eschweiler%2C+D">Dennis Eschweiler</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.17808v1-abstract-short" style="display: inline;"> The segmentation and tracking of living cells play a vital role within the biomedical domain, particularly in cancer research, drug development, and developmental biology. These are usually tedious and time-consuming tasks that are traditionally done by biomedical experts. Recently, to automatize these processes, deep learning based segmentation and tracking methods have been proposed. These metho… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.17808v1-abstract-full').style.display = 'inline'; document.getElementById('2403.17808v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.17808v1-abstract-full" style="display: none;"> The segmentation and tracking of living cells play a vital role within the biomedical domain, particularly in cancer research, drug development, and developmental biology. These are usually tedious and time-consuming tasks that are traditionally done by biomedical experts. Recently, to automatize these processes, deep learning based segmentation and tracking methods have been proposed. These methods require large-scale datasets and their full potential is constrained by the scarcity of annotated data in the biomedical imaging domain. To address this limitation, we propose Biomedical Video Diffusion Model (BVDM), capable of generating realistic-looking synthetic microscopy videos. Trained only on a single real video, BVDM can generate videos of arbitrary length with pixel-level annotations that can be used for training data-hungry models. It is composed of a denoising diffusion probabilistic model (DDPM) generating high-fidelity synthetic cell microscopy images and a flow prediction model (FPM) predicting the non-rigid transformation between consecutive video frames. During inference, initially, the DDPM imposes realistic cell textures on synthetic cell masks which are generated based on real data statistics. The flow prediction model predicts the flow field between consecutive masks and applies that to the DDPM output from the previous time frame to create the next one while keeping temporal consistency. BVDM outperforms state-of-the-art synthetic live cell microscopy video generation models. Furthermore, we demonstrate that a sufficiently large synthetic dataset enhances the performance of cell segmentation and tracking models compared to using a limited amount of available real data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.17808v1-abstract-full').style.display = 'none'; document.getElementById('2403.17808v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.04884">arXiv:2403.04884</a> <span> [<a href="https://arxiv.org/pdf/2403.04884">pdf</a>, <a href="https://arxiv.org/format/2403.04884">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Optimizing Retinal Prosthetic Stimuli with Conditional Invertible Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wu%2C+Y">Yuli Wu</a>, <a href="/search/cs?searchtype=author&query=Wittmann%2C+J">Julian Wittmann</a>, <a href="/search/cs?searchtype=author&query=Walter%2C+P">Peter Walter</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.04884v2-abstract-short" style="display: inline;"> Implantable retinal prostheses offer a promising solution to restore partial vision by circumventing damaged photoreceptor cells in the retina and directly stimulating the remaining functional retinal cells. However, the information transmission between the camera and retinal cells is often limited by the low resolution of the electrode array and the lack of specificity for different ganglion cell… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.04884v2-abstract-full').style.display = 'inline'; document.getElementById('2403.04884v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.04884v2-abstract-full" style="display: none;"> Implantable retinal prostheses offer a promising solution to restore partial vision by circumventing damaged photoreceptor cells in the retina and directly stimulating the remaining functional retinal cells. However, the information transmission between the camera and retinal cells is often limited by the low resolution of the electrode array and the lack of specificity for different ganglion cell types, resulting in suboptimal stimulations. In this work, we propose to utilize normalizing flow-based conditional invertible neural networks to optimize retinal implant stimulation in an unsupervised manner. The invertibility of these networks allows us to use them as a surrogate for the computational model of the visual system, while also encoding input camera signals into optimized electrical stimuli on the electrode array. Compared to other methods, such as trivial downsampling, linear models, and feed-forward convolutional neural networks, the flow-based invertible neural network and its conditional extension yield better visual reconstruction qualities w.r.t. various metrics using a physiologically validated simulation tool. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.04884v2-abstract-full').style.display = 'none'; document.getElementById('2403.04884v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.09049">arXiv:2401.09049</a> <span> [<a href="https://arxiv.org/pdf/2401.09049">pdf</a>, <a href="https://arxiv.org/format/2401.09049">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Enhancing Lidar-based Object Detection in Adverse Weather using Offset Sequences in Time </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=van+Kempen%2C+R">Raphael van Kempen</a>, <a href="/search/cs?searchtype=author&query=Rehbronn%2C+T">Tim Rehbronn</a>, <a href="/search/cs?searchtype=author&query=Jose%2C+A">Abin Jose</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Lampe%2C+B">Bastian Lampe</a>, <a href="/search/cs?searchtype=author&query=Woopen%2C+T">Timo Woopen</a>, <a href="/search/cs?searchtype=author&query=Eckstein%2C+L">Lutz Eckstein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.09049v1-abstract-short" style="display: inline;"> Automated vehicles require an accurate perception of their surroundings for safe and efficient driving. Lidar-based object detection is a widely used method for environment perception, but its performance is significantly affected by adverse weather conditions such as rain and fog. In this work, we investigate various strategies for enhancing the robustness of lidar-based object detection by proce… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.09049v1-abstract-full').style.display = 'inline'; document.getElementById('2401.09049v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.09049v1-abstract-full" style="display: none;"> Automated vehicles require an accurate perception of their surroundings for safe and efficient driving. Lidar-based object detection is a widely used method for environment perception, but its performance is significantly affected by adverse weather conditions such as rain and fog. In this work, we investigate various strategies for enhancing the robustness of lidar-based object detection by processing sequential data samples generated by lidar sensors. Our approaches leverage temporal information to improve a lidar object detection model, without the need for additional filtering or pre-processing steps. We compare $10$ different neural network architectures that process point cloud sequences including a novel augmentation strategy introducing a temporal offset between frames of a sequence during training and evaluate the effectiveness of all strategies on lidar point clouds under adverse weather conditions through experiments. Our research provides a comprehensive study of effective methods for mitigating the effects of adverse weather on the reliability of lidar-based object detection using sequential data that are evaluated using public datasets such as nuScenes, Dense, and the Canadian Adverse Driving Conditions Dataset. Our findings demonstrate that our novel method, involving temporal offset augmentation through randomized frame skipping in sequences, enhances object detection accuracy compared to both the baseline model (Pillar-based Object Detection) and no augmentation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.09049v1-abstract-full').style.display = 'none'; document.getElementById('2401.09049v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published as part of the III. International Conference on Electrical, Computer and Energy Technologies (ICECET 2023), Cape Town, South Africa, November 16-17, 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.05479">arXiv:2311.05479</a> <span> [<a href="https://arxiv.org/pdf/2311.05479">pdf</a>, <a href="https://arxiv.org/format/2311.05479">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> </div> <p class="title is-5 mathjax"> Retinal OCT Synthesis with Denoising Diffusion Probabilistic Models for Layer Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wu%2C+Y">Yuli Wu</a>, <a href="/search/cs?searchtype=author&query=He%2C+W">Weidong He</a>, <a href="/search/cs?searchtype=author&query=Eschweiler%2C+D">Dennis Eschweiler</a>, <a href="/search/cs?searchtype=author&query=Dou%2C+N">Ningxin Dou</a>, <a href="/search/cs?searchtype=author&query=Fan%2C+Z">Zixin Fan</a>, <a href="/search/cs?searchtype=author&query=Mi%2C+S">Shengli Mi</a>, <a href="/search/cs?searchtype=author&query=Walter%2C+P">Peter Walter</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.05479v2-abstract-short" style="display: inline;"> Modern biomedical image analysis using deep learning often encounters the challenge of limited annotated data. To overcome this issue, deep generative models can be employed to synthesize realistic biomedical images. In this regard, we propose an image synthesis method that utilizes denoising diffusion probabilistic models (DDPMs) to automatically generate retinal optical coherence tomography (OCT… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.05479v2-abstract-full').style.display = 'inline'; document.getElementById('2311.05479v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.05479v2-abstract-full" style="display: none;"> Modern biomedical image analysis using deep learning often encounters the challenge of limited annotated data. To overcome this issue, deep generative models can be employed to synthesize realistic biomedical images. In this regard, we propose an image synthesis method that utilizes denoising diffusion probabilistic models (DDPMs) to automatically generate retinal optical coherence tomography (OCT) images. By providing rough layer sketches, the trained DDPMs can generate realistic circumpapillary OCT images. We further find that more accurate pseudo labels can be obtained through knowledge adaptation, which greatly benefits the segmentation task. Through this, we observe a consistent improvement in layer segmentation accuracy, which is validated using various neural networks. Furthermore, we have discovered that a layer segmentation model trained solely with synthesized images can achieve comparable results to a model trained exclusively with real images. These findings demonstrate the promising potential of DDPMs in reducing the need for manual annotations of retinal OCT images. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.05479v2-abstract-full').style.display = 'none'; document.getElementById('2311.05479v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ISBI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.04887">arXiv:2309.04887</a> <span> [<a href="https://arxiv.org/pdf/2309.04887">pdf</a>, <a href="https://arxiv.org/format/2309.04887">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SortedAP: Rethinking evaluation metrics for instance segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chen%2C+L">Long Chen</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+Y">Yuli Wu</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Merhof%2C+D">Dorit Merhof</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.04887v1-abstract-short" style="display: inline;"> Designing metrics for evaluating instance segmentation revolves around comprehensively considering object detection and segmentation accuracy. However, other important properties, such as sensitivity, continuity, and equality, are overlooked in the current study. In this paper, we reveal that most existing metrics have a limited resolution of segmentation quality. They are only conditionally sensi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.04887v1-abstract-full').style.display = 'inline'; document.getElementById('2309.04887v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.04887v1-abstract-full" style="display: none;"> Designing metrics for evaluating instance segmentation revolves around comprehensively considering object detection and segmentation accuracy. However, other important properties, such as sensitivity, continuity, and equality, are overlooked in the current study. In this paper, we reveal that most existing metrics have a limited resolution of segmentation quality. They are only conditionally sensitive to the change of masks or false predictions. For certain metrics, the score can change drastically in a narrow range which could provide a misleading indication of the quality gap between results. Therefore, we propose a new metric called sortedAP, which strictly decreases with both object- and pixel-level imperfections and has an uninterrupted penalization scale over the entire domain. We provide the evaluation toolkit and experiment code at https://www.github.com/looooongChen/sortedAP. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.04887v1-abstract-full').style.display = 'none'; document.getElementById('2309.04887v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.06965">arXiv:2305.06965</a> <span> [<a href="https://arxiv.org/pdf/2305.06965">pdf</a>, <a href="https://arxiv.org/format/2305.06965">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Transformers for CT Reconstruction From Monoplanar and Biplanar Radiographs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=M%C3%BCller-Franzes%2C+G">Gustav M眉ller-Franzes</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.06965v1-abstract-short" style="display: inline;"> Computed Tomography (CT) scans provide detailed and accurate information of internal structures in the body. They are constructed by sending x-rays through the body from different directions and combining this information into a three-dimensional volume. Such volumes can then be used to diagnose a wide range of conditions and allow for volumetric measurements of organs. In this work, we tackle the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.06965v1-abstract-full').style.display = 'inline'; document.getElementById('2305.06965v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.06965v1-abstract-full" style="display: none;"> Computed Tomography (CT) scans provide detailed and accurate information of internal structures in the body. They are constructed by sending x-rays through the body from different directions and combining this information into a three-dimensional volume. Such volumes can then be used to diagnose a wide range of conditions and allow for volumetric measurements of organs. In this work, we tackle the problem of reconstructing CT images from biplanar x-rays only. X-rays are widely available and even if the CT reconstructed from these radiographs is not a replacement of a complete CT in the diagnostic setting, it might serve to spare the patients from radiation where a CT is only acquired for rough measurements such as determining organ size. We propose a novel method based on the transformer architecture, by framing the underlying task as a language translation problem. Radiographs and CT images are first embedded into latent quantized codebook vectors using two different autoencoder networks. We then train a GPT model, to reconstruct the codebook vectors of the CT image, conditioned on the codebook vectors of the x-rays and show that this approach leads to realistic looking images. To encourage further research in this direction, we make our code publicly available on GitHub: XXX. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.06965v1-abstract-full').style.display = 'none'; document.getElementById('2305.06965v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.06963">arXiv:2305.06963</a> <span> [<a href="https://arxiv.org/pdf/2305.06963">pdf</a>, <a href="https://arxiv.org/format/2305.06963">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Cascaded Cross-Attention Networks for Data-Efficient Whole-Slide Image Classification Using Transformers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.06963v1-abstract-short" style="display: inline;"> Whole-Slide Imaging allows for the capturing and digitization of high-resolution images of histological specimen. An automated analysis of such images using deep learning models is therefore of high demand. The transformer architecture has been proposed as a possible candidate for effectively leveraging the high-resolution information. Here, the whole-slide image is partitioned into smaller image… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.06963v1-abstract-full').style.display = 'inline'; document.getElementById('2305.06963v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.06963v1-abstract-full" style="display: none;"> Whole-Slide Imaging allows for the capturing and digitization of high-resolution images of histological specimen. An automated analysis of such images using deep learning models is therefore of high demand. The transformer architecture has been proposed as a possible candidate for effectively leveraging the high-resolution information. Here, the whole-slide image is partitioned into smaller image patches and feature tokens are extracted from these image patches. However, while the conventional transformer allows for a simultaneous processing of a large set of input tokens, the computational demand scales quadratically with the number of input tokens and thus quadratically with the number of image patches. To address this problem we propose a novel cascaded cross-attention network (CCAN) based on the cross-attention mechanism that scales linearly with the number of extracted patches. Our experiments demonstrate that this architecture is at least on-par with and even outperforms other attention-based state-of-the-art methods on two public datasets: On the use-case of lung cancer (TCGA NSCLC) our model reaches a mean area under the receiver operating characteristic (AUC) of 0.970 $\pm$ 0.008 and on renal cancer (TCGA RCC) reaches a mean AUC of 0.985 $\pm$ 0.004. Furthermore, we show that our proposed model is efficient in low-data regimes, making it a promising approach for analyzing whole-slide images in resource-limited settings. To foster research in this direction, we make our code publicly available on GitHub: XXX. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.06963v1-abstract-full').style.display = 'none'; document.getElementById('2305.06963v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.03570">arXiv:2302.03570</a> <span> [<a href="https://arxiv.org/pdf/2302.03570">pdf</a>, <a href="https://arxiv.org/format/2302.03570">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> A Deep Learning-based in silico Framework for Optimization on Retinal Prosthetic Stimulation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wu%2C+Y">Yuli Wu</a>, <a href="/search/cs?searchtype=author&query=Karetic%2C+I">Ivan Karetic</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Walter%2C+P">Peter Walter</a>, <a href="/search/cs?searchtype=author&query=Merhof%2C+D">Dorit Merhof</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.03570v1-abstract-short" style="display: inline;"> We propose a neural network-based framework to optimize the perceptions simulated by the in silico retinal implant model pulse2percept. The overall pipeline consists of a trainable encoder, a pre-trained retinal implant model and a pre-trained evaluator. The encoder is a U-Net, which takes the original image and outputs the stimulus. The pre-trained retinal implant model is also a U-Net, which is… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.03570v1-abstract-full').style.display = 'inline'; document.getElementById('2302.03570v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.03570v1-abstract-full" style="display: none;"> We propose a neural network-based framework to optimize the perceptions simulated by the in silico retinal implant model pulse2percept. The overall pipeline consists of a trainable encoder, a pre-trained retinal implant model and a pre-trained evaluator. The encoder is a U-Net, which takes the original image and outputs the stimulus. The pre-trained retinal implant model is also a U-Net, which is trained to mimic the biomimetic perceptual model implemented in pulse2percept. The evaluator is a shallow VGG classifier, which is trained with original images. Based on 10,000 test images from the MNIST dataset, we show that the convolutional neural network-based encoder performs significantly better than the trivial downsampling approach, yielding a boost in the weighted F1-Score by 36.17% in the pre-trained classifier with 6x10 electrodes. With this fully neural network-based encoder, the quality of the downstream perceptions can be fine-tuned using gradient descent in an end-to-end fashion. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.03570v1-abstract-full').style.display = 'none'; document.getElementById('2302.03570v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.10227">arXiv:2301.10227</a> <span> [<a href="https://arxiv.org/pdf/2301.10227">pdf</a>, <a href="https://arxiv.org/ps/2301.10227">ps</a>, <a href="https://arxiv.org/format/2301.10227">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Denoising Diffusion Probabilistic Models for Generation of Realistic Fully-Annotated Microscopy Image Data Sets </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Eschweiler%2C+D">Dennis Eschweiler</a>, <a href="/search/cs?searchtype=author&query=Yilmaz%2C+R">R眉veyda Yilmaz</a>, <a href="/search/cs?searchtype=author&query=Baumann%2C+M">Matisse Baumann</a>, <a href="/search/cs?searchtype=author&query=Laube%2C+I">Ina Laube</a>, <a href="/search/cs?searchtype=author&query=Roy%2C+R">Rijo Roy</a>, <a href="/search/cs?searchtype=author&query=Jose%2C+A">Abin Jose</a>, <a href="/search/cs?searchtype=author&query=Br%C3%BCckner%2C+D">Daniel Br眉ckner</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.10227v2-abstract-short" style="display: inline;"> Recent advances in computer vision have led to significant progress in the generation of realistic image data, with denoising diffusion probabilistic models proving to be a particularly effective method. In this study, we demonstrate that diffusion models can effectively generate fully-annotated microscopy image data sets through an unsupervised and intuitive approach, using rough sketches of desi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.10227v2-abstract-full').style.display = 'inline'; document.getElementById('2301.10227v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.10227v2-abstract-full" style="display: none;"> Recent advances in computer vision have led to significant progress in the generation of realistic image data, with denoising diffusion probabilistic models proving to be a particularly effective method. In this study, we demonstrate that diffusion models can effectively generate fully-annotated microscopy image data sets through an unsupervised and intuitive approach, using rough sketches of desired structures as the starting point. The proposed pipeline helps to reduce the reliance on manual annotations when training deep learning-based segmentation approaches and enables the segmentation of diverse datasets without the need for human annotations. This approach holds great promise in streamlining the data generation process and enabling a more efficient and scalable training of segmentation models, as we show in the example of different practical experiments involving various organisms and cell types. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.10227v2-abstract-full').style.display = 'none'; document.getElementById('2301.10227v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.09162">arXiv:2212.09162</a> <span> [<a href="https://arxiv.org/pdf/2212.09162">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Medical Diagnosis with Large Scale Multimodal Transformers: Leveraging Diverse Data for More Accurate Diagnosis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Mueller-Franzes%2C+G">Gustav Mueller-Franzes</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+T">Tianci Wang</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Arasteh%2C+S+T">Soroosh Tayebi Arasteh</a>, <a href="/search/cs?searchtype=author&query=Haarburger%2C+C">Christoph Haarburger</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Bressem%2C+K">Keno Bressem</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.09162v2-abstract-short" style="display: inline;"> Multimodal deep learning has been used to predict clinical endpoints and diagnoses from clinical routine data. However, these models suffer from scaling issues: they have to learn pairwise interactions between each piece of information in each data type, thereby escalating model complexity beyond manageable scales. This has so far precluded a widespread use of multimodal deep learning. Here, we pr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.09162v2-abstract-full').style.display = 'inline'; document.getElementById('2212.09162v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.09162v2-abstract-full" style="display: none;"> Multimodal deep learning has been used to predict clinical endpoints and diagnoses from clinical routine data. However, these models suffer from scaling issues: they have to learn pairwise interactions between each piece of information in each data type, thereby escalating model complexity beyond manageable scales. This has so far precluded a widespread use of multimodal deep learning. Here, we present a new technical approach of "learnable synergies", in which the model only selects relevant interactions between data modalities and keeps an "internal memory" of relevant data. Our approach is easily scalable and naturally adapts to multimodal data inputs from clinical routine. We demonstrate this approach on three large multimodal datasets from radiology and ophthalmology and show that it outperforms state-of-the-art models in clinically relevant diagnosis tasks. Our new approach is transferable and will allow the application of multimodal deep learning to a broad set of clinically relevant problems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.09162v2-abstract-full').style.display = 'none'; document.getElementById('2212.09162v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.03364">arXiv:2211.03364</a> <span> [<a href="https://arxiv.org/pdf/2211.03364">pdf</a>, <a href="https://arxiv.org/format/2211.03364">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Medical Diffusion: Denoising Diffusion Probabilistic Models for 3D Medical Image Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Khader%2C+F">Firas Khader</a>, <a href="/search/cs?searchtype=author&query=Mueller-Franzes%2C+G">Gustav Mueller-Franzes</a>, <a href="/search/cs?searchtype=author&query=Arasteh%2C+S+T">Soroosh Tayebi Arasteh</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Tianyu Han</a>, <a href="/search/cs?searchtype=author&query=Haarburger%2C+C">Christoph Haarburger</a>, <a href="/search/cs?searchtype=author&query=Schulze-Hagen%2C+M">Maximilian Schulze-Hagen</a>, <a href="/search/cs?searchtype=author&query=Schad%2C+P">Philipp Schad</a>, <a href="/search/cs?searchtype=author&query=Engelhardt%2C+S">Sandy Engelhardt</a>, <a href="/search/cs?searchtype=author&query=Baessler%2C+B">Bettina Baessler</a>, <a href="/search/cs?searchtype=author&query=Foersch%2C+S">Sebastian Foersch</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Kuhl%2C+C">Christiane Kuhl</a>, <a href="/search/cs?searchtype=author&query=Nebelung%2C+S">Sven Nebelung</a>, <a href="/search/cs?searchtype=author&query=Kather%2C+J+N">Jakob Nikolas Kather</a>, <a href="/search/cs?searchtype=author&query=Truhn%2C+D">Daniel Truhn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.03364v7-abstract-short" style="display: inline;"> Recent advances in computer vision have shown promising results in image generation. Diffusion probabilistic models in particular have generated realistic images from textual input, as demonstrated by DALL-E 2, Imagen and Stable Diffusion. However, their use in medicine, where image data typically comprises three-dimensional volumes, has not been systematically evaluated. Synthetic images may play… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.03364v7-abstract-full').style.display = 'inline'; document.getElementById('2211.03364v7-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.03364v7-abstract-full" style="display: none;"> Recent advances in computer vision have shown promising results in image generation. Diffusion probabilistic models in particular have generated realistic images from textual input, as demonstrated by DALL-E 2, Imagen and Stable Diffusion. However, their use in medicine, where image data typically comprises three-dimensional volumes, has not been systematically evaluated. Synthetic images may play a crucial role in privacy preserving artificial intelligence and can also be used to augment small datasets. Here we show that diffusion probabilistic models can synthesize high quality medical imaging data, which we show for Magnetic Resonance Images (MRI) and Computed Tomography (CT) images. We provide quantitative measurements of their performance through a reader study with two medical experts who rated the quality of the synthesized images in three categories: Realistic image appearance, anatomical correctness and consistency between slices. Furthermore, we demonstrate that synthetic images can be used in a self-supervised pre-training and improve the performance of breast segmentation models when data is scarce (dice score 0.91 vs. 0.95 without vs. with synthetic data). The code is publicly available on GitHub: https://github.com/FirasGit/medicaldiffusion. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.03364v7-abstract-full').style.display = 'none'; document.getElementById('2211.03364v7-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.02743">arXiv:2108.02743</a> <span> [<a href="https://arxiv.org/pdf/2108.02743">pdf</a>, <a href="https://arxiv.org/format/2108.02743">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Semi- and Self-Supervised Multi-View Fusion of 3D Microscopy Images using Generative Adversarial Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yang%2C+C">Canyu Yang</a>, <a href="/search/cs?searchtype=author&query=Eschweiler%2C+D">Dennis Eschweiler</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.02743v1-abstract-short" style="display: inline;"> Recent developments in fluorescence microscopy allow capturing high-resolution 3D images over time for living model organisms. To be able to image even large specimens, techniques like multi-view light-sheet imaging record different orientations at each time point that can then be fused into a single high-quality volume. Based on measured point spread functions (PSF), deconvolution and content fus… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.02743v1-abstract-full').style.display = 'inline'; document.getElementById('2108.02743v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.02743v1-abstract-full" style="display: none;"> Recent developments in fluorescence microscopy allow capturing high-resolution 3D images over time for living model organisms. To be able to image even large specimens, techniques like multi-view light-sheet imaging record different orientations at each time point that can then be fused into a single high-quality volume. Based on measured point spread functions (PSF), deconvolution and content fusion are able to largely revert the inevitable degradation occurring during the imaging process. Classical multi-view deconvolution and fusion methods mainly use iterative procedures and content-based averaging. Lately, Convolutional Neural Networks (CNNs) have been deployed to approach 3D single-view deconvolution microscopy, but the multi-view case waits to be studied. We investigated the efficacy of CNN-based multi-view deconvolution and fusion with two synthetic data sets that mimic developing embryos and involve either two or four complementary 3D views. Compared with classical state-of-the-art methods, the proposed semi- and self-supervised models achieve competitive and superior deconvolution and fusion quality in the two-view and quad-view cases, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.02743v1-abstract-full').style.display = 'none'; document.getElementById('2108.02743v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 3 figures, 1 table, accepted for publication at MLMIR 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.10180">arXiv:2107.10180</a> <span> [<a href="https://arxiv.org/pdf/2107.10180">pdf</a>, <a href="https://arxiv.org/ps/2107.10180">ps</a>, <a href="https://arxiv.org/format/2107.10180">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1371/journal.pone.0260509">10.1371/journal.pone.0260509 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> 3D fluorescence microscopy data synthesis for segmentation and benchmarking </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Eschweiler%2C+D">Dennis Eschweiler</a>, <a href="/search/cs?searchtype=author&query=Rethwisch%2C+M">Malte Rethwisch</a>, <a href="/search/cs?searchtype=author&query=Jarchow%2C+M">Mareike Jarchow</a>, <a href="/search/cs?searchtype=author&query=Koppers%2C+S">Simon Koppers</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.10180v1-abstract-short" style="display: inline;"> Automated image processing approaches are indispensable for many biomedical experiments and help to cope with the increasing amount of microscopy image data in a fast and reproducible way. Especially state-of-the-art deep learning-based approaches most often require large amounts of annotated training data to produce accurate and generalist outputs, but they are often compromised by the general la… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.10180v1-abstract-full').style.display = 'inline'; document.getElementById('2107.10180v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.10180v1-abstract-full" style="display: none;"> Automated image processing approaches are indispensable for many biomedical experiments and help to cope with the increasing amount of microscopy image data in a fast and reproducible way. Especially state-of-the-art deep learning-based approaches most often require large amounts of annotated training data to produce accurate and generalist outputs, but they are often compromised by the general lack of those annotated data sets. In this work, we propose how conditional generative adversarial networks can be utilized to generate realistic image data for 3D fluorescence microscopy from annotation masks of 3D cellular structures. In combination with mask simulation approaches, we demonstrate the generation of fully-annotated 3D microscopy data sets that we make publicly available for training or benchmarking. An additional positional conditioning of the cellular structures enables the reconstruction of position-dependent intensity characteristics and allows to generate image data of different quality levels. A patch-wise working principle and a subsequent full-size reassemble strategy is used to generate image data of arbitrary size and different organisms. We present this as a proof-of-concept for the automated generation of fully-annotated training data sets requiring only a minimum of manual interaction to alleviate the need of manual annotations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.10180v1-abstract-full').style.display = 'none'; document.getElementById('2107.10180v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.00794">arXiv:2105.00794</a> <span> [<a href="https://arxiv.org/pdf/2105.00794">pdf</a>, <a href="https://arxiv.org/ps/2105.00794">ps</a>, <a href="https://arxiv.org/format/2105.00794">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Robust 3D Cell Segmentation: Extending the View of Cellpose </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Eschweiler%2C+D">Dennis Eschweiler</a>, <a href="/search/cs?searchtype=author&query=Smith%2C+R+S">Richard S. Smith</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.00794v3-abstract-short" style="display: inline;"> Increasing data set sizes of 3D microscopy imaging experiments demand for an automation of segmentation processes to be able to extract meaningful biomedical information. Due to the shortage of annotated 3D image data that can be used for machine learning-based approaches, 3D segmentation approaches are required to be robust and to generalize well to unseen data. The Cellpose approach proposed by… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.00794v3-abstract-full').style.display = 'inline'; document.getElementById('2105.00794v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.00794v3-abstract-full" style="display: none;"> Increasing data set sizes of 3D microscopy imaging experiments demand for an automation of segmentation processes to be able to extract meaningful biomedical information. Due to the shortage of annotated 3D image data that can be used for machine learning-based approaches, 3D segmentation approaches are required to be robust and to generalize well to unseen data. The Cellpose approach proposed by Stringer et al. proved to be such a generalist approach for cell instance segmentation tasks. In this paper, we extend the Cellpose approach to improve segmentation accuracy on 3D image data and we further show how the formulation of the gradient maps can be simplified while still being robust and reaching similar segmentation accuracy. The code is publicly available and was integrated into two established open-source applications that allow using the 3D extension of Cellpose without any programming knowledge. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.00794v3-abstract-full').style.display = 'none'; document.getElementById('2105.00794v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.12369">arXiv:2010.12369</a> <span> [<a href="https://arxiv.org/pdf/2010.12369">pdf</a>, <a href="https://arxiv.org/format/2010.12369">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Spherical Harmonics for Shape-Constrained 3D Cell Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Eschweiler%2C+D">Dennis Eschweiler</a>, <a href="/search/cs?searchtype=author&query=Rethwisch%2C+M">Malte Rethwisch</a>, <a href="/search/cs?searchtype=author&query=Koppers%2C+S">Simon Koppers</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.12369v1-abstract-short" style="display: inline;"> Recent microscopy imaging techniques allow to precisely analyze cell morphology in 3D image data. To process the vast amount of image data generated by current digitized imaging techniques, automated approaches are demanded more than ever. Segmentation approaches used for morphological analyses, however, are often prone to produce unnaturally shaped predictions, which in conclusion could lead to i… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.12369v1-abstract-full').style.display = 'inline'; document.getElementById('2010.12369v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.12369v1-abstract-full" style="display: none;"> Recent microscopy imaging techniques allow to precisely analyze cell morphology in 3D image data. To process the vast amount of image data generated by current digitized imaging techniques, automated approaches are demanded more than ever. Segmentation approaches used for morphological analyses, however, are often prone to produce unnaturally shaped predictions, which in conclusion could lead to inaccurate experimental outcomes. In order to minimize further manual interaction, shape priors help to constrain the predictions to the set of natural variations. In this paper, we show how spherical harmonics can be used as an alternative way to inherently constrain the predictions of neural networks for the segmentation of cells in 3D microscopy image data. Benefits and limitations of the spherical harmonic representation are analyzed and final results are compared to other state-of-the-art approaches on two different data sets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.12369v1-abstract-full').style.display = 'none'; document.getElementById('2010.12369v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.12011">arXiv:2010.12011</a> <span> [<a href="https://arxiv.org/pdf/2010.12011">pdf</a>, <a href="https://arxiv.org/format/2010.12011">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cell Behavior">q-bio.CB</span> </div> </div> <p class="title is-5 mathjax"> CellCycleGAN: Spatiotemporal Microscopy Image Synthesis of Cell Populations using Statistical Shape Models and Conditional GANs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=B%C3%A4hr%2C+D">Dennis B盲hr</a>, <a href="/search/cs?searchtype=author&query=Eschweiler%2C+D">Dennis Eschweiler</a>, <a href="/search/cs?searchtype=author&query=Bhattacharyya%2C+A">Anuk Bhattacharyya</a>, <a href="/search/cs?searchtype=author&query=Moreno-Andr%C3%A9s%2C+D">Daniel Moreno-Andr茅s</a>, <a href="/search/cs?searchtype=author&query=Antonin%2C+W">Wolfram Antonin</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.12011v2-abstract-short" style="display: inline;"> Automatic analysis of spatio-temporal microscopy images is inevitable for state-of-the-art research in the life sciences. Recent developments in deep learning provide powerful tools for automatic analyses of such image data, but heavily depend on the amount and quality of provided training data to perform well. To this end, we developed a new method for realistic generation of synthetic 2D+t micro… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.12011v2-abstract-full').style.display = 'inline'; document.getElementById('2010.12011v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.12011v2-abstract-full" style="display: none;"> Automatic analysis of spatio-temporal microscopy images is inevitable for state-of-the-art research in the life sciences. Recent developments in deep learning provide powerful tools for automatic analyses of such image data, but heavily depend on the amount and quality of provided training data to perform well. To this end, we developed a new method for realistic generation of synthetic 2D+t microscopy image data of fluorescently labeled cellular nuclei. The method combines spatiotemporal statistical shape models of different cell cycle stages with a conditional GAN to generate time series of cell populations and provides instance-level control of cell cycle stage and the fluorescence intensity of generated cells. We show the effect of the GAN conditioning and create a set of synthetic images that can be readily used for training and benchmarking of cell segmentation and tracking approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.12011v2-abstract-full').style.display = 'none'; document.getElementById('2010.12011v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.03847">arXiv:2002.03847</a> <span> [<a href="https://arxiv.org/pdf/2002.03847">pdf</a>, <a href="https://arxiv.org/format/2002.03847">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Making Logic Learnable With Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Brudermueller%2C+T">Tobias Brudermueller</a>, <a href="/search/cs?searchtype=author&query=Shung%2C+D+L">Dennis L. Shung</a>, <a href="/search/cs?searchtype=author&query=Stanley%2C+A+J">Adrian J. Stanley</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Krishnaswamy%2C+S">Smita Krishnaswamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.03847v3-abstract-short" style="display: inline;"> While neural networks are good at learning unspecified functions from training samples, they cannot be directly implemented in hardware and are often not interpretable or formally verifiable. On the other hand, logic circuits are implementable, verifiable, and interpretable but are not able to learn from training data in a generalizable way. We propose a novel logic learning pipeline that combines… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.03847v3-abstract-full').style.display = 'inline'; document.getElementById('2002.03847v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.03847v3-abstract-full" style="display: none;"> While neural networks are good at learning unspecified functions from training samples, they cannot be directly implemented in hardware and are often not interpretable or formally verifiable. On the other hand, logic circuits are implementable, verifiable, and interpretable but are not able to learn from training data in a generalizable way. We propose a novel logic learning pipeline that combines the advantages of neural networks and logic circuits. Our pipeline first trains a neural network on a classification task, and then translates this, first to random forests, and then to AND-Inverter logic. We show that our pipeline maintains greater accuracy than naive translations to logic, and minimizes the logic such that it is more interpretable and has decreased hardware cost. We show the utility of our pipeline on a network that is trained on biomedical data. This approach could be applied to patient care to provide risk stratification and guide clinical decision-making. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.03847v3-abstract-full').style.display = 'none'; document.getElementById('2002.03847v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2001.11469">arXiv:2001.11469</a> <span> [<a href="https://arxiv.org/pdf/2001.11469">pdf</a>, <a href="https://arxiv.org/format/2001.11469">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cell Behavior">q-bio.CB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Tissues and Organs">q-bio.TO</span> </div> </div> <p class="title is-5 mathjax"> Semi-Automatic Generation of Tight Binary Masks and Non-Convex Isosurfaces for Quantitative Analysis of 3D Biological Samples </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Bhide%2C+S">Sourabh Bhide</a>, <a href="/search/cs?searchtype=author&query=Mikut%2C+R">Ralf Mikut</a>, <a href="/search/cs?searchtype=author&query=Leptin%2C+M">Maria Leptin</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2001.11469v1-abstract-short" style="display: inline;"> Current in vivo microscopy allows us detailed spatiotemporal imaging (3D+t) of complete organisms and offers insights into their development on the cellular level. Even though the imaging speed and quality is steadily improving, fully-automated segmentation and analysis methods are often not accurate enough. This is particularly true while imaging large samples (100um - 1mm) and deep inside the sp… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.11469v1-abstract-full').style.display = 'inline'; document.getElementById('2001.11469v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2001.11469v1-abstract-full" style="display: none;"> Current in vivo microscopy allows us detailed spatiotemporal imaging (3D+t) of complete organisms and offers insights into their development on the cellular level. Even though the imaging speed and quality is steadily improving, fully-automated segmentation and analysis methods are often not accurate enough. This is particularly true while imaging large samples (100um - 1mm) and deep inside the specimen. Drosophila embryogenesis, widely used as a developmental paradigm, presents an example for such a challenge, especially where cell outlines need to imaged - a general challenge in other systems as well. To deal with the current bottleneck in analyzing quantitatively the 3D+t light-sheet microscopy images of Drosophila embryos, we developed a collection of semi-automatic open-source tools. The presented methods include a semi-automatic masking procedure, automatic projection of non-convex 3D isosurfaces to 2D representations as well as cell segmentation and tracking. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.11469v1-abstract-full').style.display = 'none'; document.getElementById('2001.11469v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 January, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1910.00443">arXiv:1910.00443</a> <span> [<a href="https://arxiv.org/pdf/1910.00443">pdf</a>, <a href="https://arxiv.org/format/1910.00443">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cell Behavior">q-bio.CB</span> </div> </div> <p class="title is-5 mathjax"> Towards Automatic Embryo Staging in 3D+T Microscopy Images using Convolutional Neural Networks and PointNets </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Traub%2C+M">Manuel Traub</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1910.00443v3-abstract-short" style="display: inline;"> Automatic analyses and comparisons of different stages of embryonic development largely depend on a highly accurate spatiotemporal alignment of the investigated data sets. In this contribution, we assess multiple approaches for automatic staging of developing embryos that were imaged with time-resolved 3D light-sheet microscopy. The methods comprise image-based convolutional neural networks as wel… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.00443v3-abstract-full').style.display = 'inline'; document.getElementById('1910.00443v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1910.00443v3-abstract-full" style="display: none;"> Automatic analyses and comparisons of different stages of embryonic development largely depend on a highly accurate spatiotemporal alignment of the investigated data sets. In this contribution, we assess multiple approaches for automatic staging of developing embryos that were imaged with time-resolved 3D light-sheet microscopy. The methods comprise image-based convolutional neural networks as well as an approach based on the PointNet architecture that directly operates on 3D point clouds of detected cell nuclei centroids. The experiments with four wild-type zebrafish embryos render both approaches suitable for automatic staging with average deviations of 21 - 34 minutes. Moreover, a proof-of-concept evaluation based on simulated 3D+t point cloud data sets shows that average deviations of less than 7 minutes are possible. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.00443v3-abstract-full').style.display = 'none'; document.getElementById('1910.00443v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 3 figures, 1 table, accepted paper at the Simulation and Synthesis in Medical Imaging (SASHIMI) Workshop held at MICCAI 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.06890">arXiv:1904.06890</a> <span> [<a href="https://arxiv.org/pdf/1904.06890">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cell Behavior">q-bio.CB</span> </div> </div> <p class="title is-5 mathjax"> Algorithms used for the Cell Segmentation Benchmark Competition at ISBI 2019 by RWTH-GE </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Eschweiler%2C+D">Dennis Eschweiler</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.06890v1-abstract-short" style="display: inline;"> The presented algorithms for segmentation and tracking follow a 3-step approach where we detect, track and finally segment nuclei. In the preprocessing phase, we detect centroids of the cell nuclei using a convolutional neural network (CNN) for the 2D images and a Laplacian-of-Gaussian Scale Space Maximum Projection approach for the 3D data sets. Tracking was performed in a backwards fashion on th… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.06890v1-abstract-full').style.display = 'inline'; document.getElementById('1904.06890v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.06890v1-abstract-full" style="display: none;"> The presented algorithms for segmentation and tracking follow a 3-step approach where we detect, track and finally segment nuclei. In the preprocessing phase, we detect centroids of the cell nuclei using a convolutional neural network (CNN) for the 2D images and a Laplacian-of-Gaussian Scale Space Maximum Projection approach for the 3D data sets. Tracking was performed in a backwards fashion on the predicted seed points, i.e., starting at the last frame and sequentially connecting corresponding objects until the first frame was reached. Correspondences were identified by propagating detections of a frame t to its preceding frame t-1 and by combining redundant detections using a hierarchical clustering approach. The tracked centroids were then used as input to variants of the seeded watershed algorithm to obtain the final segmentation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.06890v1-abstract-full').style.display = 'none'; document.getElementById('1904.06890v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages, algorithms used for the Cell Segmentation Benchmark competition at IEEE International Symposium on Biomedical Imaging (ISBI) 2019 in Venice, Italy</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1810.06933">arXiv:1810.06933</a> <span> [<a href="https://arxiv.org/pdf/1810.06933">pdf</a>, <a href="https://arxiv.org/format/1810.06933">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> CNN-based Preprocessing to Optimize Watershed-based Cell Segmentation in 3D Confocal Microscopy Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Eschweiler%2C+D">Dennis Eschweiler</a>, <a href="/search/cs?searchtype=author&query=Spina%2C+T+V">Thiago V. Spina</a>, <a href="/search/cs?searchtype=author&query=Choudhury%2C+R+C">Rohan C. Choudhury</a>, <a href="/search/cs?searchtype=author&query=Meyerowitz%2C+E">Elliot Meyerowitz</a>, <a href="/search/cs?searchtype=author&query=Cunha%2C+A">Alexandre Cunha</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1810.06933v1-abstract-short" style="display: inline;"> The quantitative analysis of cellular membranes helps understanding developmental processes at the cellular level. Particularly 3D microscopic image data offers valuable insights into cell dynamics, but error-free automatic segmentation remains challenging due to the huge amount of data generated and strong variations in image intensities. In this paper, we propose a new 3D segmentation approach w… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.06933v1-abstract-full').style.display = 'inline'; document.getElementById('1810.06933v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1810.06933v1-abstract-full" style="display: none;"> The quantitative analysis of cellular membranes helps understanding developmental processes at the cellular level. Particularly 3D microscopic image data offers valuable insights into cell dynamics, but error-free automatic segmentation remains challenging due to the huge amount of data generated and strong variations in image intensities. In this paper, we propose a new 3D segmentation approach which combines the discriminative power of convolutional neural networks (CNNs) for preprocessing and investigates the performance of three watershed-based postprocessing strategies (WS), which are well suited to segment object shapes, even when supplied with vague seed and boundary constraints. To leverage the full potential of the watershed algorithm, the multi-instance segmentation problem is initially interpreted as three-class semantic segmentation problem, which in turn is well-suited for the application of CNNs. Using manually annotated 3D confocal microscopy images of Arabidopsis thaliana, we show the superior performance of the proposed method compared to the state of the art. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.06933v1-abstract-full').style.display = 'none'; document.getElementById('1810.06933v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 4 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.07073">arXiv:1806.07073</a> <span> [<a href="https://arxiv.org/pdf/1806.07073">pdf</a>, <a href="https://arxiv.org/format/1806.07073">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> Transfer Learning with Human Corneal Tissues: An Analysis of Optimal Cut-Off Layer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Prodanova%2C+N">Nadezhda Prodanova</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Allgeier%2C+S">Stephan Allgeier</a>, <a href="/search/cs?searchtype=author&query=Bohn%2C+S">Sebastian Bohn</a>, <a href="/search/cs?searchtype=author&query=Stachs%2C+O">Oliver Stachs</a>, <a href="/search/cs?searchtype=author&query=K%C3%B6hler%2C+B">Bernd K枚hler</a>, <a href="/search/cs?searchtype=author&query=Mikut%2C+R">Ralf Mikut</a>, <a href="/search/cs?searchtype=author&query=Bartschat%2C+A">Andreas Bartschat</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.07073v2-abstract-short" style="display: inline;"> Transfer learning is a powerful tool to adapt trained neural networks to new tasks. Depending on the similarity of the original task to the new task, the selection of the cut-off layer is critical. For medical applications like tissue classification, the last layers of an object classification network might not be optimal. We found that on real data of human corneal tissues the best feature repres… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.07073v2-abstract-full').style.display = 'inline'; document.getElementById('1806.07073v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.07073v2-abstract-full" style="display: none;"> Transfer learning is a powerful tool to adapt trained neural networks to new tasks. Depending on the similarity of the original task to the new task, the selection of the cut-off layer is critical. For medical applications like tissue classification, the last layers of an object classification network might not be optimal. We found that on real data of human corneal tissues the best feature representation can be found in the middle layers of the Inception-v3 and in the rear layers of the VGG-19 architecture. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.07073v2-abstract-full').style.display = 'none'; document.getElementById('1806.07073v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Extendend Abstract with 3 pages and 2 figures. Submitted to MIDL Amsterdam, see openreviews.org</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1710.09933">arXiv:1710.09933</a> <span> [<a href="https://arxiv.org/pdf/1710.09933">pdf</a>, <a href="https://arxiv.org/format/1710.09933">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SEGMENT3D: A Web-based Application for Collaborative Segmentation of 3D images used in the Shoot Apical Meristem </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Spina%2C+T+V">Thiago V. Spina</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Falc%C3%A3o%2C+A+X">Alexandre X. Falc茫o</a>, <a href="/search/cs?searchtype=author&query=Meyerowitz%2C+E">Elliot Meyerowitz</a>, <a href="/search/cs?searchtype=author&query=Cunha%2C+A">Alexandre Cunha</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1710.09933v1-abstract-short" style="display: inline;"> The quantitative analysis of 3D confocal microscopy images of the shoot apical meristem helps understanding the growth process of some plants. Cell segmentation in these images is crucial for computational plant analysis and many automated methods have been proposed. However, variations in signal intensity across the image mitigate the effectiveness of those approaches with no easy way for user co… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.09933v1-abstract-full').style.display = 'inline'; document.getElementById('1710.09933v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1710.09933v1-abstract-full" style="display: none;"> The quantitative analysis of 3D confocal microscopy images of the shoot apical meristem helps understanding the growth process of some plants. Cell segmentation in these images is crucial for computational plant analysis and many automated methods have been proposed. However, variations in signal intensity across the image mitigate the effectiveness of those approaches with no easy way for user correction. We propose a web-based collaborative 3D image segmentation application, SEGMENT3D, to leverage automatic segmentation results. The image is divided into 3D tiles that can be either segmented interactively from scratch or corrected from a pre-existing segmentation. Individual segmentation results per tile are then automatically merged via consensus analysis and then stitched to complete the segmentation for the entire image stack. SEGMENT3D is a comprehensive application that can be applied to other 3D imaging modalities and general objects. It also provides an easy way to create supervised data to advance segmentation using machine learning models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.09933v1-abstract-full').style.display = 'none'; document.getElementById('1710.09933v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 October, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1710.06608">arXiv:1710.06608</a> <span> [<a href="https://arxiv.org/pdf/1710.06608">pdf</a>, <a href="https://arxiv.org/format/1710.06608">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Cell Segmentation in 3D Confocal Images using Supervoxel Merge-Forests with CNN-based Hypothesis Selection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Spina%2C+T+V">Thiago V. Spina</a>, <a href="/search/cs?searchtype=author&query=Falc%C3%A3o%2C+A+X">Alexandre X. Falc茫o</a>, <a href="/search/cs?searchtype=author&query=Bartschat%2C+A">Andreas Bartschat</a>, <a href="/search/cs?searchtype=author&query=Mikut%2C+R">Ralf Mikut</a>, <a href="/search/cs?searchtype=author&query=Meyerowitz%2C+E">Elliot Meyerowitz</a>, <a href="/search/cs?searchtype=author&query=Cunha%2C+A">Alexandre Cunha</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1710.06608v1-abstract-short" style="display: inline;"> Automated segmentation approaches are crucial to quantitatively analyze large-scale 3D microscopy images. Particularly in deep tissue regions, automatic methods still fail to provide error-free segmentations. To improve the segmentation quality throughout imaged samples, we present a new supervoxel-based 3D segmentation approach that outperforms current methods and reduces the manual correction ef… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.06608v1-abstract-full').style.display = 'inline'; document.getElementById('1710.06608v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1710.06608v1-abstract-full" style="display: none;"> Automated segmentation approaches are crucial to quantitatively analyze large-scale 3D microscopy images. Particularly in deep tissue regions, automatic methods still fail to provide error-free segmentations. To improve the segmentation quality throughout imaged samples, we present a new supervoxel-based 3D segmentation approach that outperforms current methods and reduces the manual correction effort. The algorithm consists of gentle preprocessing and a conservative super-voxel generation method followed by supervoxel agglomeration based on local signal properties and a postprocessing step to fix under-segmentation errors using a Convolutional Neural Network. We validate the functionality of the algorithm on manually labeled 3D confocal images of the plant Arabidopis thaliana and compare the results to a state-of-the-art meristem segmentation algorithm. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.06608v1-abstract-full').style.display = 'none'; document.getElementById('1710.06608v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 3 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1704.03298">arXiv:1704.03298</a> <span> [<a href="https://arxiv.org/pdf/1704.03298">pdf</a>, <a href="https://arxiv.org/format/1704.03298">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> The MATLAB Toolbox SciXMiner: User's Manual and Programmer's Guide </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mikut%2C+R">Ralf Mikut</a>, <a href="/search/cs?searchtype=author&query=Bartschat%2C+A">Andreas Bartschat</a>, <a href="/search/cs?searchtype=author&query=Doneit%2C+W">Wolfgang Doneit</a>, <a href="/search/cs?searchtype=author&query=Ordiano%2C+J+%C3%81+G">Jorge 脕ngel Gonz谩lez Ordiano</a>, <a href="/search/cs?searchtype=author&query=Schott%2C+B">Benjamin Schott</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Waczowicz%2C+S">Simon Waczowicz</a>, <a href="/search/cs?searchtype=author&query=Reischl%2C+M">Markus Reischl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1704.03298v1-abstract-short" style="display: inline;"> The Matlab toolbox SciXMiner is designed for the visualization and analysis of time series and features with a special focus to classification problems. It was developed at the Institute of Applied Computer Science of the Karlsruhe Institute of Technology (KIT), a member of the Helmholtz Association of German Research Centres in Germany. The aim was to provide an open platform for the development… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1704.03298v1-abstract-full').style.display = 'inline'; document.getElementById('1704.03298v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1704.03298v1-abstract-full" style="display: none;"> The Matlab toolbox SciXMiner is designed for the visualization and analysis of time series and features with a special focus to classification problems. It was developed at the Institute of Applied Computer Science of the Karlsruhe Institute of Technology (KIT), a member of the Helmholtz Association of German Research Centres in Germany. The aim was to provide an open platform for the development and improvement of data mining methods and its applications to various medical and technical problems. SciXMiner bases on Matlab (tested for the version 2017a). Many functions do not require additional standard toolboxes but some parts of Signal, Statistics and Wavelet toolboxes are used for special cases. The decision to a Matlab-based solution was made to use the wide mathematical functionality of this package provided by The Mathworks Inc. SciXMiner is controlled by a graphical user interface (GUI) with menu items and control elements like popup lists, checkboxes and edit elements. This makes it easier to work with SciXMiner for inexperienced users. Furthermore, an automatization and batch standardization of analyzes is possible using macros. The standard Matlab style using the command line is also available. SciXMiner is an open source software. The download page is http://sourceforge.net/projects/SciXMiner. It is licensed under the conditions of the GNU General Public License (GNU-GPL) of The Free Software Foundation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1704.03298v1-abstract-full').style.display = 'none'; document.getElementById('1704.03298v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 April, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1702.05413">arXiv:1702.05413</a> <span> [<a href="https://arxiv.org/pdf/1702.05413">pdf</a>, <a href="https://arxiv.org/format/1702.05413">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> </div> </div> <p class="title is-5 mathjax"> 3D Cell Nuclei Segmentation with Balanced Graph Partitioning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Arz%2C+J">Julian Arz</a>, <a href="/search/cs?searchtype=author&query=Sanders%2C+P">Peter Sanders</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Mikut%2C+R">Ralf Mikut</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1702.05413v1-abstract-short" style="display: inline;"> Cell nuclei segmentation is one of the most important tasks in the analysis of biomedical images. With ever-growing sizes and amounts of three-dimensional images to be processed, there is a need for better and faster segmentation methods. Graph-based image segmentation has seen a rise in popularity in recent years, but is seen as very costly with regard to computational demand. We propose a new se… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1702.05413v1-abstract-full').style.display = 'inline'; document.getElementById('1702.05413v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1702.05413v1-abstract-full" style="display: none;"> Cell nuclei segmentation is one of the most important tasks in the analysis of biomedical images. With ever-growing sizes and amounts of three-dimensional images to be processed, there is a need for better and faster segmentation methods. Graph-based image segmentation has seen a rise in popularity in recent years, but is seen as very costly with regard to computational demand. We propose a new segmentation algorithm which overcomes these limitations. Our method uses recursive balanced graph partitioning to segment foreground components of a fast and efficient binarization. We construct a model for the cell nuclei to guide the partitioning process. Our algorithm is compared to other state-of-the-art segmentation algorithms in an experimental evaluation on two sets of realistically simulated inputs. Our method is faster, has similar or better quality and an acceptable memory overhead. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1702.05413v1-abstract-full').style.display = 'none'; document.getElementById('1702.05413v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 February, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1608.08471">arXiv:1608.08471</a> <span> [<a href="https://arxiv.org/pdf/1608.08471">pdf</a>, <a href="https://arxiv.org/format/1608.08471">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.5445/IR/1000057821">10.5445/IR/1000057821 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> New Methods to Improve Large-Scale Microscopy Image Analysis with Prior Knowledge and Uncertainty </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1608.08471v1-abstract-short" style="display: inline;"> Multidimensional imaging techniques provide powerful ways to examine various kinds of scientific questions. The routinely produced datasets in the terabyte-range, however, can hardly be analyzed manually and require an extensive use of automated image analysis. The present thesis introduces a new concept for the estimation and propagation of uncertainty involved in image analysis operators and new… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1608.08471v1-abstract-full').style.display = 'inline'; document.getElementById('1608.08471v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1608.08471v1-abstract-full" style="display: none;"> Multidimensional imaging techniques provide powerful ways to examine various kinds of scientific questions. The routinely produced datasets in the terabyte-range, however, can hardly be analyzed manually and require an extensive use of automated image analysis. The present thesis introduces a new concept for the estimation and propagation of uncertainty involved in image analysis operators and new segmentation algorithms that are suitable for terabyte-scale analyses of 3D+t microscopy images. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1608.08471v1-abstract-full').style.display = 'none'; document.getElementById('1608.08471v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 August, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">218 pages, 58 figures, PhD thesis, Department of Mechanical Engineering, Karlsruhe Institute of Technology, published online with KITopen (License: CC BY-SA 3.0, http://dx.doi.org/10.5445/IR/1000057821)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1608.01276">arXiv:1608.01276</a> <span> [<a href="https://arxiv.org/pdf/1608.01276">pdf</a>, <a href="https://arxiv.org/format/1608.01276">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1371/journal.pone.0187535">10.1371/journal.pone.0187535 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fuzzy-based Propagation of Prior Knowledge to Improve Large-Scale Image Analysis Pipelines </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Mikut%2C+R">Ralf Mikut</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1608.01276v1-abstract-short" style="display: inline;"> Many automatically analyzable scientific questions are well-posed and offer a variety of information about the expected outcome a priori. Although often being neglected, this prior knowledge can be systematically exploited to make automated analysis operations sensitive to a desired phenomenon or to evaluate extracted content with respect to this prior knowledge. For instance, the performance of p… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1608.01276v1-abstract-full').style.display = 'inline'; document.getElementById('1608.01276v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1608.01276v1-abstract-full" style="display: none;"> Many automatically analyzable scientific questions are well-posed and offer a variety of information about the expected outcome a priori. Although often being neglected, this prior knowledge can be systematically exploited to make automated analysis operations sensitive to a desired phenomenon or to evaluate extracted content with respect to this prior knowledge. For instance, the performance of processing operators can be greatly enhanced by a more focused detection strategy and the direct information about the ambiguity inherent in the extracted data. We present a new concept for the estimation and propagation of uncertainty involved in image analysis operators. This allows using simple processing operators that are suitable for analyzing large-scale 3D+t microscopy images without compromising the result quality. On the foundation of fuzzy set theory, we transform available prior knowledge into a mathematical representation and extensively use it enhance the result quality of various processing operators. All presented concepts are illustrated on a typical bioimage analysis pipeline comprised of seed point detection, segmentation, multiview fusion and tracking. Furthermore, the functionality of the proposed approach is validated on a comprehensive simulated 3D+t benchmark data set that mimics embryonic development and on large-scale light-sheet microscopy data of a zebrafish embryo. The general concept introduced in this contribution represents a new approach to efficiently exploit prior knowledge to improve the result quality of image analysis pipelines. Especially, the automated analysis of terabyte-scale microscopy data will benefit from sophisticated and efficient algorithms that enable a quantitative and fast readout. The generality of the concept, however, makes it also applicable to practically any other field with processing strategies that are arranged as linear pipelines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1608.01276v1-abstract-full').style.display = 'none'; document.getElementById('1608.01276v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 August, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">39 pages, 12 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1604.04906">arXiv:1604.04906</a> <span> [<a href="https://arxiv.org/pdf/1604.04906">pdf</a>, <a href="https://arxiv.org/format/1604.04906">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cell Behavior">q-bio.CB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ISBI.2016.7493359">10.1109/ISBI.2016.7493359 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Generating Semi-Synthetic Validation Benchmarks for Embryomics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Arz%2C+J">Julian Arz</a>, <a href="/search/cs?searchtype=author&query=Schott%2C+B">Benjamin Schott</a>, <a href="/search/cs?searchtype=author&query=Otte%2C+J+C">Jens C. Otte</a>, <a href="/search/cs?searchtype=author&query=Kobitski%2C+A">Andrei Kobitski</a>, <a href="/search/cs?searchtype=author&query=Nienhaus%2C+G+U">G. Ulrich Nienhaus</a>, <a href="/search/cs?searchtype=author&query=Str%C3%A4hle%2C+U">Uwe Str盲hle</a>, <a href="/search/cs?searchtype=author&query=Sanders%2C+P">Peter Sanders</a>, <a href="/search/cs?searchtype=author&query=Mikut%2C+R">Ralf Mikut</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1604.04906v1-abstract-short" style="display: inline;"> Systematic validation is an essential part of algorithm development. The enormous dataset sizes and the complexity observed in many recent time-resolved 3D fluorescence microscopy imaging experiments, however, prohibit a comprehensive manual ground truth generation. Moreover, existing simulated benchmarks in this field are often too simple or too specialized to sufficiently validate the observed i… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1604.04906v1-abstract-full').style.display = 'inline'; document.getElementById('1604.04906v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1604.04906v1-abstract-full" style="display: none;"> Systematic validation is an essential part of algorithm development. The enormous dataset sizes and the complexity observed in many recent time-resolved 3D fluorescence microscopy imaging experiments, however, prohibit a comprehensive manual ground truth generation. Moreover, existing simulated benchmarks in this field are often too simple or too specialized to sufficiently validate the observed image analysis problems. We present a new semi-synthetic approach to generate realistic 3D+t benchmarks that combines challenging cellular movement dynamics of real embryos with simulated fluorescent nuclei and artificial image distortions including various parametrizable options like cell numbers, acquisition deficiencies or multiview simulations. We successfully applied the approach to simulate the development of a zebrafish embryo with thousands of cells over 14 hours of its early existence. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1604.04906v1-abstract-full').style.display = 'none'; document.getElementById('1604.04906v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 April, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted publication at IEEE International Symposium on Biomedical Imaging: From Nano to Macro (ISBI), 2016</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1602.02938">arXiv:1602.02938</a> <span> [<a href="https://arxiv.org/pdf/1602.02938">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Challenges of Integrating A Priori Information Efficiently in the Discovery of Spatio-Temporal Objects in Large Databases </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Schott%2C+B">Benjamin Schott</a>, <a href="/search/cs?searchtype=author&query=Stegmaier%2C+J">Johannes Stegmaier</a>, <a href="/search/cs?searchtype=author&query=Takamiya%2C+M">Masanari Takamiya</a>, <a href="/search/cs?searchtype=author&query=Mikut%2C+R">Ralf Mikut</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1602.02938v1-abstract-short" style="display: inline;"> Using the knowledge discovery framework, it is possible to explore object databases and extract groups of objects with highly heterogeneous movement behavior by efficiently integrating a priori knowledge through interacting with the framework. The whole process is modular expandable and is therefore adaptive to any problem formulation. Further, the flexible use of different information allocation… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1602.02938v1-abstract-full').style.display = 'inline'; document.getElementById('1602.02938v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1602.02938v1-abstract-full" style="display: none;"> Using the knowledge discovery framework, it is possible to explore object databases and extract groups of objects with highly heterogeneous movement behavior by efficiently integrating a priori knowledge through interacting with the framework. The whole process is modular expandable and is therefore adaptive to any problem formulation. Further, the flexible use of different information allocation processes reveal a great potential to efficiently incorporate the a priori knowledge of different users in different ways. Therefore, the stepwise knowledge discovery process embedded in the knowledge discovery framework is described in detail to point out the flexibility of such a system incorporating object databases from different applications. The described framework can be used to gain knowledge out of object databases in many different fields. This knowledge can be used to gain further insights and improve the understanding of underlying phenomena. The functionality of the proposed framework is exemplarily demonstrated using a benchmark database based on real biological object data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1602.02938v1-abstract-full').style.display = 'none'; document.getElementById('1602.02938v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 February, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Proc., 25. Workshop Computational Intelligence, Dortmund, 2015</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 92-08; 92C37; 92C55; 68U10 </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>