CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–46 of 46 results for author: <span class="mathjax">Ram, K</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Ram%2C+K">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Ram, K"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Ram%2C+K&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Ram, K"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.02555">arXiv:2502.02555</a> <span> [<a href="https://arxiv.org/pdf/2502.02555">pdf</a>, <a href="https://arxiv.org/format/2502.02555">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> AAD-DCE: An Aggregated Multimodal Attention Mechanism for Early and Late Dynamic Contrast Enhanced Prostate MRI Synthesis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Bharti%2C+D">Divya Bharti</a>, <a href="/search/cs?searchtype=author&query=Ramanarayanan%2C+S">Sriprabha Ramanarayanan</a>, <a href="/search/cs?searchtype=author&query=S%2C+S">Sadhana S</a>, <a href="/search/cs?searchtype=author&query=M%2C+K+K">Kishore Kumar M</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Agarwal%2C+H">Harsh Agarwal</a>, <a href="/search/cs?searchtype=author&query=Venkatesan%2C+R">Ramesh Venkatesan</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.02555v2-abstract-short" style="display: inline;"> Dynamic Contrast-Enhanced Magnetic Resonance Imaging (DCE-MRI) is a medical imaging technique that plays a crucial role in the detailed visualization and identification of tissue perfusion in abnormal lesions and radiological suggestions for biopsy. However, DCE-MRI involves the administration of a Gadolinium based (Gad) contrast agent, which is associated with a risk of toxicity in the body. Prev… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.02555v2-abstract-full').style.display = 'inline'; document.getElementById('2502.02555v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.02555v2-abstract-full" style="display: none;"> Dynamic Contrast-Enhanced Magnetic Resonance Imaging (DCE-MRI) is a medical imaging technique that plays a crucial role in the detailed visualization and identification of tissue perfusion in abnormal lesions and radiological suggestions for biopsy. However, DCE-MRI involves the administration of a Gadolinium based (Gad) contrast agent, which is associated with a risk of toxicity in the body. Previous deep learning approaches that synthesize DCE-MR images employ unimodal non-contrast or low-dose contrast MRI images lacking focus on the local perfusion information within the anatomy of interest. We propose AAD-DCE, a generative adversarial network (GAN) with an aggregated attention discriminator module consisting of global and local discriminators. The discriminators provide a spatial embedded attention map to drive the generator to synthesize early and late response DCE-MRI images. Our method employs multimodal inputs - T2 weighted (T2W), Apparent Diffusion Coefficient (ADC), and T1 pre-contrast for image synthesis. Extensive comparative and ablation studies on the ProstateX dataset show that our model (i) is agnostic to various generator benchmarks and (ii) outperforms other DCE-MRI synthesis approaches with improvement margins of +0.64 dB PSNR, +0.0518 SSIM, -0.015 MAE for early response and +0.1 dB PSNR, +0.0424 SSIM, -0.021 MAE for late response, and (ii) emphasize the importance of attention ensembling. Our code is available at https://github.com/bhartidivya/AAD-DCE. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.02555v2-abstract-full').style.display = 'none'; document.getElementById('2502.02555v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at ICASSP 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.13984">arXiv:2501.13984</a> <span> [<a href="https://arxiv.org/pdf/2501.13984">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/CIBCB58642.2024.10702112">10.1109/CIBCB58642.2024.10702112 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Comprehensive Modeling and Question Answering of Cancer Clinical Practice Guidelines using LLMs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gupta%2C+B">Bhumika Gupta</a>, <a href="/search/cs?searchtype=author&query=Ta%2C+P">Pralaypati Ta</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.13984v1-abstract-short" style="display: inline;"> The updated recommendations on diagnostic procedures and treatment pathways for a medical condition are documented as graphical flows in Clinical Practice Guidelines (CPGs). For effective use of the CPGs in helping medical professionals in the treatment decision process, it is necessary to fully capture the guideline knowledge, particularly the contexts and their relationships in the graph. While… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.13984v1-abstract-full').style.display = 'inline'; document.getElementById('2501.13984v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.13984v1-abstract-full" style="display: none;"> The updated recommendations on diagnostic procedures and treatment pathways for a medical condition are documented as graphical flows in Clinical Practice Guidelines (CPGs). For effective use of the CPGs in helping medical professionals in the treatment decision process, it is necessary to fully capture the guideline knowledge, particularly the contexts and their relationships in the graph. While several existing works have utilized these guidelines to create rule bases for Clinical Decision Support Systems, limited work has been done toward directly capturing the full medical knowledge contained in CPGs. This work proposes an approach to create a contextually enriched, faithful digital representation of National Comprehensive Cancer Network (NCCN) Cancer CPGs in the form of graphs using automated extraction and node & relationship classification. We also implement semantic enrichment of the model by using Large Language Models (LLMs) for node classification, achieving an accuracy of 80.86% and 88.47% with zero-shot learning and few-shot learning, respectively. Additionally, we introduce a methodology for answering natural language questions with constraints to guideline text by leveraging LLMs to extract the relevant subgraph from the guideline knowledge base. By generating natural language answers based on subgraph paths and semantic information, we mitigate the risk of incorrect answers and hallucination associated with LLMs, ensuring factual accuracy in medical domain Question Answering. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.13984v1-abstract-full').style.display = 'none'; document.getElementById('2501.13984v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.21053">arXiv:2407.21053</a> <span> [<a href="https://arxiv.org/pdf/2407.21053">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Knowledge Models for Cancer Clinical Practice Guidelines : Construction, Management and Usage in Question Answering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ta%2C+P">Pralaypati Ta</a>, <a href="/search/cs?searchtype=author&query=Gupta%2C+B">Bhumika Gupta</a>, <a href="/search/cs?searchtype=author&query=Jain%2C+A">Arihant Jain</a>, <a href="/search/cs?searchtype=author&query=C%2C+S+S">Sneha Sree C</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.21053v1-abstract-short" style="display: inline;"> An automated knowledge modeling algorithm for Cancer Clinical Practice Guidelines (CPGs) extracts the knowledge contained in the CPG documents and transforms it into a programmatically interactable, easy-to-update structured model with minimal human intervention. The existing automated algorithms have minimal scope and cannot handle the varying complexity of the knowledge content in the CPGs for d… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.21053v1-abstract-full').style.display = 'inline'; document.getElementById('2407.21053v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.21053v1-abstract-full" style="display: none;"> An automated knowledge modeling algorithm for Cancer Clinical Practice Guidelines (CPGs) extracts the knowledge contained in the CPG documents and transforms it into a programmatically interactable, easy-to-update structured model with minimal human intervention. The existing automated algorithms have minimal scope and cannot handle the varying complexity of the knowledge content in the CPGs for different cancer types. This work proposes an improved automated knowledge modeling algorithm to create knowledge models from the National Comprehensive Cancer Network (NCCN) CPGs in Oncology for different cancer types. The proposed algorithm has been evaluated with NCCN CPGs for four different cancer types. We also proposed an algorithm to compare the knowledge models for different versions of a guideline to discover the specific changes introduced in the treatment protocol of a new version. We created a question-answering (Q&A) framework with the guideline knowledge models as the augmented knowledge base to study our ability to query the knowledge models. We compiled a set of 32 question-answer pairs derived from two reliable data sources for the treatment of Non-Small Cell Lung Cancer (NSCLC) to evaluate the Q&A framework. The framework was evaluated against the question-answer pairs from one data source, and it can generate the answers with 54.5% accuracy from the treatment algorithm and 81.8% accuracy from the discussion part of the NCCN NSCLC guideline knowledge model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.21053v1-abstract-full').style.display = 'none'; document.getElementById('2407.21053v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.01578">arXiv:2407.01578</a> <span> [<a href="https://arxiv.org/pdf/2407.01578">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> A Hybrid-Layered System for Image-Guided Navigation and Robot Assisted Spine Surgeries </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=T%2C+S+A">Suhail Ansari T</a>, <a href="/search/cs?searchtype=author&query=Maik%2C+V">Vivek Maik</a>, <a href="/search/cs?searchtype=author&query=Naheem%2C+M">Minhas Naheem</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Lakshmanan%2C+M">Manojkumar Lakshmanan</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.01578v1-abstract-short" style="display: inline;"> In response to the growing demand for precise and affordable solutions for Image-Guided Spine Surgery (IGSS), this paper presents a comprehensive development of a Robot-Assisted and Navigation-Guided IGSS System. The endeavor involves integrating cutting-edge technologies to attain the required surgical precision and limit user radiation exposure, thereby addressing the limitations of manual surgi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.01578v1-abstract-full').style.display = 'inline'; document.getElementById('2407.01578v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.01578v1-abstract-full" style="display: none;"> In response to the growing demand for precise and affordable solutions for Image-Guided Spine Surgery (IGSS), this paper presents a comprehensive development of a Robot-Assisted and Navigation-Guided IGSS System. The endeavor involves integrating cutting-edge technologies to attain the required surgical precision and limit user radiation exposure, thereby addressing the limitations of manual surgical methods. We propose an IGSS workflow and system architecture employing a hybrid-layered approach, combining modular and integrated system architectures in distinctive layers to develop an affordable system for seamless integration, scalability, and reconfigurability. We developed and integrated the system and extensively tested it on phantoms and cadavers. The proposed system's accuracy using navigation guidance is 1.02 0.34 mm, and robot assistance is 1.11 0.49 mm on phantoms. Observing a similar performance in cadaveric validation where 84% of screw placements were grade A, 10% were grade B using navigation guidance, 90% were grade A, and 10% were grade B using robot assistance as per the Gertzbein-Robbins scale, proving its efficacy for an IGSS. The evaluated performance is adequate for an IGSS and at par with the existing systems in literature and those commercially available. The user radiation is lower than in the literature, given that the system requires only an average of 3 C-Arm images per pedicle screw placement and verification. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.01578v1-abstract-full').style.display = 'none'; document.getElementById('2407.01578v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:2406.04644</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.07785">arXiv:2406.07785</a> <span> [<a href="https://arxiv.org/pdf/2406.07785">pdf</a>, <a href="https://arxiv.org/format/2406.07785">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> From Variance to Veracity: Unbundling and Mitigating Gradient Variance in Differentiable Bundle Adjustment Layers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gurumurthy%2C+S">Swaminathan Gurumurthy</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Karnik Ram</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+B">Bingqing Chen</a>, <a href="/search/cs?searchtype=author&query=Manchester%2C+Z">Zachary Manchester</a>, <a href="/search/cs?searchtype=author&query=Kolter%2C+Z">Zico Kolter</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.07785v1-abstract-short" style="display: inline;"> Various pose estimation and tracking problems in robotics can be decomposed into a correspondence estimation problem (often computed using a deep network) followed by a weighted least squares optimization problem to solve for the poses. Recent work has shown that coupling the two problems by iteratively refining one conditioned on the other's output yields SOTA results across domains. However, tra… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.07785v1-abstract-full').style.display = 'inline'; document.getElementById('2406.07785v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.07785v1-abstract-full" style="display: none;"> Various pose estimation and tracking problems in robotics can be decomposed into a correspondence estimation problem (often computed using a deep network) followed by a weighted least squares optimization problem to solve for the poses. Recent work has shown that coupling the two problems by iteratively refining one conditioned on the other's output yields SOTA results across domains. However, training these models has proved challenging, requiring a litany of tricks to stabilize and speed up training. In this work, we take the visual odometry problem as an example and identify three plausible causes: (1) flow loss interference, (2) linearization errors in the bundle adjustment (BA) layer, and (3) dependence of weight gradients on the BA residual. We show how these issues result in noisy and higher variance gradients, potentially leading to a slow down in training and instabilities. We then propose a simple, yet effective solution to reduce the gradient variance by using the weights predicted by the network in the inner optimization loop to weight the correspondence objective in the training problem. This helps the training objective `focus' on the more important points, thereby reducing the variance and mitigating the influence of outliers. We show that the resulting method leads to faster training and can be more flexibly trained in varying training setups without sacrificing performance. In particular we show $2$--$2.5\times$ training speedups over a baseline visual odometry model we modify. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.07785v1-abstract-full').style.display = 'none'; document.getElementById('2406.07785v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at CVPR 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.04644">arXiv:2406.04644</a> <span> [<a href="https://arxiv.org/pdf/2406.04644">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/SII58957.2024.10417647">10.1109/SII58957.2024.10417647 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A Hybrid-Layered System for Image-Guided Navigation and Robot Assisted Spine Surgery </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=T%2C+S+A">Suhail Ansari T</a>, <a href="/search/cs?searchtype=author&query=Maik%2C+V">Vivek Maik</a>, <a href="/search/cs?searchtype=author&query=Naheem%2C+M">Minhas Naheem</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Lakshmanan%2C+M">Manojkumar Lakshmanan</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.04644v1-abstract-short" style="display: inline;"> In response to the growing demand for precise and affordable solutions for Image-Guided Spine Surgery (IGSS), this paper presents a comprehensive development of a Robot-Assisted and Navigation-Guided IGSS System. The endeavor involves integrating cutting-edge technologies to attain the required surgical precision and limit user radiation exposure, thereby addressing the limitations of manual surgi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.04644v1-abstract-full').style.display = 'inline'; document.getElementById('2406.04644v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.04644v1-abstract-full" style="display: none;"> In response to the growing demand for precise and affordable solutions for Image-Guided Spine Surgery (IGSS), this paper presents a comprehensive development of a Robot-Assisted and Navigation-Guided IGSS System. The endeavor involves integrating cutting-edge technologies to attain the required surgical precision and limit user radiation exposure, thereby addressing the limitations of manual surgical methods. We propose an IGSS workflow and system architecture employing a hybrid-layered approach, combining modular and integrated system architectures in distinctive layers to develop an affordable system for seamless integration, scalability, and reconfigurability. We developed and integrated the system and extensively tested it on phantoms and cadavers. The proposed system's accuracy using navigation guidance is 1.020 mm, and robot assistance is 1.11 mm on phantoms. Observing a similar performance in cadaveric validation where 84% of screw placements were grade A, 10% were grade B using navigation guidance, 90% were grade A, and 10% were grade B using robot assistance as per the Gertzbein-Robbins scale, proving its efficacy for an IGSS. The evaluated performance is adequate for an IGSS and at par with the existing systems in literature and those commercially available. The user radiation is lower than in the literature, given that the system requires only an average of 3 C-Arm images per pedicle screw placement and verification <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.04644v1-abstract-full').style.display = 'none'; document.getElementById('2406.04644v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 Pages, 4 Figures, Published in IEEE SII Conference</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> 2024 IEEE/SICE International Symposium on System Integration (SII) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.03556">arXiv:2404.03556</a> <span> [<a href="https://arxiv.org/pdf/2404.03556">pdf</a>, <a href="https://arxiv.org/format/2404.03556">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Robot Safety Monitoring using Programmable Light Curtains </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ram%2C+K">Karnik Ram</a>, <a href="/search/cs?searchtype=author&query=Aggarwal%2C+S">Shobhit Aggarwal</a>, <a href="/search/cs?searchtype=author&query=Tamburo%2C+R">Robert Tamburo</a>, <a href="/search/cs?searchtype=author&query=Ancha%2C+S">Siddharth Ancha</a>, <a href="/search/cs?searchtype=author&query=Narasimhan%2C+S">Srinivasa Narasimhan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.03556v1-abstract-short" style="display: inline;"> As factories continue to evolve into collaborative spaces with multiple robots working together with human supervisors in the loop, ensuring safety for all actors involved becomes critical. Currently, laser-based light curtain sensors are widely used in factories for safety monitoring. While these conventional safety sensors meet high accuracy standards, they are difficult to reconfigure and can o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.03556v1-abstract-full').style.display = 'inline'; document.getElementById('2404.03556v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.03556v1-abstract-full" style="display: none;"> As factories continue to evolve into collaborative spaces with multiple robots working together with human supervisors in the loop, ensuring safety for all actors involved becomes critical. Currently, laser-based light curtain sensors are widely used in factories for safety monitoring. While these conventional safety sensors meet high accuracy standards, they are difficult to reconfigure and can only monitor a fixed user-defined region of space. Furthermore, they are typically expensive. Instead, we leverage a controllable depth sensor, programmable light curtains (PLC), to develop an inexpensive and flexible real-time safety monitoring system for collaborative robot workspaces. Our system projects virtual dynamic safety envelopes that tightly envelop the moving robot at all times and detect any objects that intrude the envelope. Furthermore, we develop an instrumentation algorithm that optimally places (multiple) PLCs in a workspace to maximize the visibility coverage of robots. Our work enables fence-less human-robot collaboration, while scaling to monitor multiple robots with few sensors. We analyze our system in a real manufacturing testbed with four robot arms and demonstrate its capabilities as a fast, accurate, and inexpensive safety monitoring solution. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.03556v1-abstract-full').style.display = 'none'; document.getElementById('2404.03556v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under review for IROS '24. Webpage http://cmu-mfi.github.io/plc-safety</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.14172">arXiv:2402.14172</a> <span> [<a href="https://arxiv.org/pdf/2402.14172">pdf</a>, <a href="https://arxiv.org/format/2402.14172">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Open Source Software Field Research: Spanning Social and Practice Networks for Re-Entering the Field </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Goggins%2C+S+P">Sean P. Goggins</a>, <a href="/search/cs?searchtype=author&query=Lumbard%2C+K">Kevin Lumbard</a>, <a href="/search/cs?searchtype=author&query=Germonprez%2C+M">Matt Germonprez</a>, <a href="/search/cs?searchtype=author&query=Du%2C+C">Caifan Du</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Karthik Ram</a>, <a href="/search/cs?searchtype=author&query=Howison%2C+J">James Howison</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.14172v2-abstract-short" style="display: inline;"> Sociotechnical research increasingly includes the social sub-networks that emerge from large-scale sociotechnical infrastructure, including the infrastructure for building open source software. This paper addresses these numerous sub-networks as advantageous for researchers. It provides a methodological synthesis focusing on how researchers can best span adjacent social sub-networks during engaged… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.14172v2-abstract-full').style.display = 'inline'; document.getElementById('2402.14172v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.14172v2-abstract-full" style="display: none;"> Sociotechnical research increasingly includes the social sub-networks that emerge from large-scale sociotechnical infrastructure, including the infrastructure for building open source software. This paper addresses these numerous sub-networks as advantageous for researchers. It provides a methodological synthesis focusing on how researchers can best span adjacent social sub-networks during engaged field research. Specifically, we describe practices and artifacts that aid movement from one social subsystem within a more extensive technical infrastructure to another. To surface the importance of spanning sub-networks, we incorporate a discussion of social capital and the role of technical infrastructure in its development for sociotechnical researchers. We then characterize a five-step process for spanning social sub-networks during engaged field research: commitment, context mapping, jargon competence, returning value, and bridging. We then present our experience studying corporate open source software projects and the role of that experience in accelerating our work in open source scientific software research as described through the lens of bridging social capital. Based on our analysis, we offer recommendations for engaging in fieldwork in adjacent social sub-networks that share a technical context and discussion of how the relationship between social and technically acquired social capital is a missing but critical methodological dimension for research on large-scale sociotechnical research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.14172v2-abstract-full').style.display = 'none'; document.getElementById('2402.14172v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.05068">arXiv:2308.05068</a> <span> [<a href="https://arxiv.org/pdf/2308.05068">pdf</a>, <a href="https://arxiv.org/format/2308.05068">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Geometric Learning-Based Transformer Network for Estimation of Segmentation Errors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=C%2C+S+S">Sneha Sree C</a>, <a href="/search/cs?searchtype=author&query=Fahim%2C+M+A">Mohammad Al Fahim</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.05068v2-abstract-short" style="display: inline;"> Many segmentation networks have been proposed for 3D volumetric segmentation of tumors and organs at risk. Hospitals and clinical institutions seek to accelerate and minimize the efforts of specialists in image segmentation. Still, in case of errors generated by these networks, clinicians would have to manually edit the generated segmentation maps. Given a 3D volume and its putative segmentation m… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.05068v2-abstract-full').style.display = 'inline'; document.getElementById('2308.05068v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.05068v2-abstract-full" style="display: none;"> Many segmentation networks have been proposed for 3D volumetric segmentation of tumors and organs at risk. Hospitals and clinical institutions seek to accelerate and minimize the efforts of specialists in image segmentation. Still, in case of errors generated by these networks, clinicians would have to manually edit the generated segmentation maps. Given a 3D volume and its putative segmentation map, we propose an approach to identify and measure erroneous regions in the segmentation map. Our method can estimate error at any point or node in a 3D mesh generated from a possibly erroneous volumetric segmentation map, serving as a Quality Assurance tool. We propose a graph neural network-based transformer based on the Nodeformer architecture to measure and classify the segmentation errors at any point. We have evaluated our network on a high-resolution micro-CT dataset of the human inner-ear bony labyrinth structure by simulating erroneous 3D segmentation maps. Our network incorporates a convolutional encoder to compute node-centric features from the input micro-CT data, the Nodeformer to learn the latent graph embeddings, and a Multi-Layer Perceptron (MLP) to compute and classify the node-wise errors. Our network achieves a mean absolute error of ~0.042 over other Graph Neural Networks (GNN) and an accuracy of 79.53% over other GNNs in estimating and classifying the node-wise errors, respectively. We also put forth vertex-normal prediction as a custom pretext task for pre-training the CNN encoder to improve the network's overall performance. Qualitative analysis shows the efficiency of our network in correctly classifying errors and reducing misclassifications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.05068v2-abstract-full').style.display = 'none'; document.getElementById('2308.05068v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in MICCAI workshop on ShapeMI, 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.04821">arXiv:2308.04821</a> <span> [<a href="https://arxiv.org/pdf/2308.04821">pdf</a>, <a href="https://arxiv.org/format/2308.04821">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> HyperCoil-Recon: A Hypernetwork-based Adaptive Coil Configuration Task Switching Network for MRI Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ramanarayanan%2C+S">Sriprabha Ramanarayanan</a>, <a href="/search/cs?searchtype=author&query=Fahim%2C+M+A">Mohammad Al Fahim</a>, <a href="/search/cs?searchtype=author&query=S.%2C+R+G">Rahul G. S.</a>, <a href="/search/cs?searchtype=author&query=Jethi%2C+A+K">Amrit Kumar Jethi</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.04821v1-abstract-short" style="display: inline;"> Parallel imaging, a fast MRI technique, involves dynamic adjustments based on the configuration i.e. number, positioning, and sensitivity of the coils with respect to the anatomy under study. Conventional deep learning-based image reconstruction models have to be trained or fine-tuned for each configuration, posing a barrier to clinical translation, given the lack of computational resources and ma… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.04821v1-abstract-full').style.display = 'inline'; document.getElementById('2308.04821v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.04821v1-abstract-full" style="display: none;"> Parallel imaging, a fast MRI technique, involves dynamic adjustments based on the configuration i.e. number, positioning, and sensitivity of the coils with respect to the anatomy under study. Conventional deep learning-based image reconstruction models have to be trained or fine-tuned for each configuration, posing a barrier to clinical translation, given the lack of computational resources and machine learning expertise for clinicians to train models at deployment. Joint training on diverse datasets learns a single weight set that might underfit to deviated configurations. We propose, HyperCoil-Recon, a hypernetwork-based coil configuration task-switching network for multi-coil MRI reconstruction that encodes varying configurations of the numbers of coils in a multi-tasking perspective, posing each configuration as a task. The hypernetworks infer and embed task-specific weights into the reconstruction network, 1) effectively utilizing the contextual knowledge of common and varying image features among the various fields-of-view of the coils, and 2) enabling generality to unseen configurations at test time. Experiments reveal that our approach 1) adapts on the fly to various unseen configurations up to 32 coils when trained on lower numbers (i.e. 7 to 11) of randomly varying coils, and to 120 deviated unseen configurations when trained on 18 configurations in a single model, 2) matches the performance of coil configuration-specific models, and 3) outperforms configuration-invariant models with improvement margins of around 1 dB / 0.03 and 0.3 dB / 0.02 in PSNR / SSIM for knee and brain data. Our code is available at https://github.com/sriprabhar/HyperCoil-Recon <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.04821v1-abstract-full').style.display = 'none'; document.getElementById('2308.04821v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at the ICCV 2023 Workshop on Computer Vision for Automated Medical Diagnosis (CVAMD), 8 pages, 2 columns</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.04262">arXiv:2308.04262</a> <span> [<a href="https://arxiv.org/pdf/2308.04262">pdf</a>, <a href="https://arxiv.org/format/2308.04262">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> SDLFormer: A Sparse and Dense Locality-enhanced Transformer for Accelerated MR Image Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=S.%2C+R+G">Rahul G. S.</a>, <a href="/search/cs?searchtype=author&query=Ramnarayanan%2C+S">Sriprabha Ramnarayanan</a>, <a href="/search/cs?searchtype=author&query=Fahim%2C+M+A">Mohammad Al Fahim</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=P%2C+P+S">Preejith S. P</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.04262v1-abstract-short" style="display: inline;"> Transformers have emerged as viable alternatives to convolutional neural networks owing to their ability to learn non-local region relationships in the spatial domain. The self-attention mechanism of the transformer enables transformers to capture long-range dependencies in the images, which might be desirable for accelerated MRI image reconstruction as the effect of undersampling is non-local in… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.04262v1-abstract-full').style.display = 'inline'; document.getElementById('2308.04262v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.04262v1-abstract-full" style="display: none;"> Transformers have emerged as viable alternatives to convolutional neural networks owing to their ability to learn non-local region relationships in the spatial domain. The self-attention mechanism of the transformer enables transformers to capture long-range dependencies in the images, which might be desirable for accelerated MRI image reconstruction as the effect of undersampling is non-local in the image domain. Despite its computational efficiency, the window-based transformers suffer from restricted receptive fields as the dependencies are limited to within the scope of the image windows. We propose a window-based transformer network that integrates dilated attention mechanism and convolution for accelerated MRI image reconstruction. The proposed network consists of dilated and dense neighborhood attention transformers to enhance the distant neighborhood pixel relationship and introduce depth-wise convolutions within the transformer module to learn low-level translation invariant features for accelerated MRI image reconstruction. The proposed model is trained in a self-supervised manner. We perform extensive experiments for multi-coil MRI acceleration for coronal PD, coronal PDFS and axial T2 contrasts with 4x and 5x under-sampling in self-supervised learning based on k-space splitting. We compare our method against other reconstruction architectures and the parallel domain self-supervised learning baseline. Results show that the proposed model exhibits improvement margins of (i) around 1.40 dB in PSNR and around 0.028 in SSIM on average over other architectures (ii) around 1.44 dB in PSNR and around 0.029 in SSIM over parallel domain self-supervised learning. The code is available at https://github.com/rahul-gs-16/sdlformer.git <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.04262v1-abstract-full').style.display = 'none'; document.getElementById('2308.04262v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at MICCAI workshop MILLanD 2023 Medical Image Learning with noisy and Limited Data</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.10231">arXiv:2307.10231</a> <span> [<a href="https://arxiv.org/pdf/2307.10231">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Automated Knowledge Modeling for Cancer Clinical Practice Guidelines </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ta%2C+P">Pralaypati Ta</a>, <a href="/search/cs?searchtype=author&query=Gupta%2C+B">Bhumika Gupta</a>, <a href="/search/cs?searchtype=author&query=Jain%2C+A">Arihant Jain</a>, <a href="/search/cs?searchtype=author&query=C%2C+S+S">Sneha Sree C</a>, <a href="/search/cs?searchtype=author&query=Sarkar%2C+A">Arunima Sarkar</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.10231v1-abstract-short" style="display: inline;"> Clinical Practice Guidelines (CPGs) for cancer diseases evolve rapidly due to new evidence generated by active research. Currently, CPGs are primarily published in a document format that is ill-suited for managing this developing knowledge. A knowledge model of the guidelines document suitable for programmatic interaction is required. This work proposes an automated method for extraction of knowle… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.10231v1-abstract-full').style.display = 'inline'; document.getElementById('2307.10231v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.10231v1-abstract-full" style="display: none;"> Clinical Practice Guidelines (CPGs) for cancer diseases evolve rapidly due to new evidence generated by active research. Currently, CPGs are primarily published in a document format that is ill-suited for managing this developing knowledge. A knowledge model of the guidelines document suitable for programmatic interaction is required. This work proposes an automated method for extraction of knowledge from National Comprehensive Cancer Network (NCCN) CPGs in Oncology and generating a structured model containing the retrieved knowledge. The proposed method was tested using two versions of NCCN Non-Small Cell Lung Cancer (NSCLC) CPG to demonstrate the effectiveness in faithful extraction and modeling of knowledge. Three enrichment strategies using Cancer staging information, Unified Medical Language System (UMLS) Metathesaurus & National Cancer Institute thesaurus (NCIt) concepts, and Node classification are also presented to enhance the model towards enabling programmatic traversal and querying of cancer care guidelines. The Node classification was performed using a Support Vector Machine (SVM) model, achieving a classification accuracy of 0.81 with 10-fold cross-validation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.10231v1-abstract-full').style.display = 'none'; document.getElementById('2307.10231v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.06771">arXiv:2307.06771</a> <span> [<a href="https://arxiv.org/pdf/2307.06771">pdf</a>, <a href="https://arxiv.org/format/2307.06771">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Generalizing Supervised Deep Learning MRI Reconstruction to Multiple and Unseen Contrasts using Meta-Learning Hypernetworks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ramanarayanan%2C+S">Sriprabha Ramanarayanan</a>, <a href="/search/cs?searchtype=author&query=Palla%2C+A">Arun Palla</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.06771v1-abstract-short" style="display: inline;"> Meta-learning has recently been an emerging data-efficient learning technique for various medical imaging operations and has helped advance contemporary deep learning models. Furthermore, meta-learning enhances the knowledge generalization of the imaging tasks by learning both shared and discriminative weights for various configurations of imaging tasks. However, existing meta-learning models atte… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.06771v1-abstract-full').style.display = 'inline'; document.getElementById('2307.06771v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.06771v1-abstract-full" style="display: none;"> Meta-learning has recently been an emerging data-efficient learning technique for various medical imaging operations and has helped advance contemporary deep learning models. Furthermore, meta-learning enhances the knowledge generalization of the imaging tasks by learning both shared and discriminative weights for various configurations of imaging tasks. However, existing meta-learning models attempt to learn a single set of weight initializations of a neural network that might be restrictive for multimodal data. This work aims to develop a multimodal meta-learning model for image reconstruction, which augments meta-learning with evolutionary capabilities to encompass diverse acquisition settings of multimodal data. Our proposed model called KM-MAML (Kernel Modulation-based Multimodal Meta-Learning), has hypernetworks that evolve to generate mode-specific weights. These weights provide the mode-specific inductive bias for multiple modes by re-calibrating each kernel of the base network for image reconstruction via a low-rank kernel modulation operation. We incorporate gradient-based meta-learning (GBML) in the contextual space to update the weights of the hypernetworks for different modes. The hypernetworks and the reconstruction network in the GBML setting provide discriminative mode-specific features and low-level image features, respectively. Experiments on multi-contrast MRI reconstruction show that our model, (i) exhibits superior reconstruction performance over joint training, other meta-learning methods, and context-specific MRI reconstruction methods, and (ii) better adaptation capabilities with improvement margins of 0.5 dB in PSNR and 0.01 in SSIM. Besides, a representation analysis with U-Net shows that kernel modulation infuses 80% of mode-specific representation changes in the high-resolution layers. Our source code is available at https://github.com/sriprabhar/KM-MAML/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.06771v1-abstract-full').style.display = 'none'; document.getElementById('2307.06771v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication in Elsevier Applied Soft Computing Journal, 36 pages, 18 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.06378">arXiv:2304.06378</a> <span> [<a href="https://arxiv.org/pdf/2304.06378">pdf</a>, <a href="https://arxiv.org/format/2304.06378">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Generalizable Deep Learning Method for Suppressing Unseen and Multiple MRI Artifacts Using Meta-learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Palla%2C+A">Arun Palla</a>, <a href="/search/cs?searchtype=author&query=Ramanarayanan%2C+S">Sriprabha Ramanarayanan</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.06378v1-abstract-short" style="display: inline;"> Magnetic Resonance (MR) images suffer from various types of artifacts due to motion, spatial resolution, and under-sampling. Conventional deep learning methods deal with removing a specific type of artifact, leading to separately trained models for each artifact type that lack the shared knowledge generalizable across artifacts. Moreover, training a model for each type and amount of artifact is a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.06378v1-abstract-full').style.display = 'inline'; document.getElementById('2304.06378v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.06378v1-abstract-full" style="display: none;"> Magnetic Resonance (MR) images suffer from various types of artifacts due to motion, spatial resolution, and under-sampling. Conventional deep learning methods deal with removing a specific type of artifact, leading to separately trained models for each artifact type that lack the shared knowledge generalizable across artifacts. Moreover, training a model for each type and amount of artifact is a tedious process that consumes more training time and storage of models. On the other hand, the shared knowledge learned by jointly training the model on multiple artifacts might be inadequate to generalize under deviations in the types and amounts of artifacts. Model-agnostic meta-learning (MAML), a nested bi-level optimization framework is a promising technique to learn common knowledge across artifacts in the outer level of optimization, and artifact-specific restoration in the inner level. We propose curriculum-MAML (CMAML), a learning process that integrates MAML with curriculum learning to impart the knowledge of variable artifact complexity to adaptively learn restoration of multiple artifacts during training. Comparative studies against Stochastic Gradient Descent and MAML, using two cardiac datasets reveal that CMAML exhibits (i) better generalization with improved PSNR for 83% of unseen types and amounts of artifacts and improved SSIM in all cases, and (ii) better artifact suppression in 4 out of 5 cases of composite artifacts (scans with multiple artifacts). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.06378v1-abstract-full').style.display = 'none'; document.getElementById('2304.06378v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 6 figures, Accepted in EMBC 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.05057">arXiv:2304.05057</a> <span> [<a href="https://arxiv.org/pdf/2304.05057">pdf</a>, <a href="https://arxiv.org/format/2304.05057">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SFT-KD-Recon: Learning a Student-friendly Teacher for Knowledge Distillation in Magnetic Resonance Image Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gayathri%2C+M+N">Matcha Naga Gayathri</a>, <a href="/search/cs?searchtype=author&query=Ramanarayanan%2C+S">Sriprabha Ramanarayanan</a>, <a href="/search/cs?searchtype=author&query=Fahim%2C+M+A">Mohammad Al Fahim</a>, <a href="/search/cs?searchtype=author&query=S%2C+R+G">Rahul G S</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.05057v1-abstract-short" style="display: inline;"> Deep cascaded architectures for magnetic resonance imaging (MRI) acceleration have shown remarkable success in providing high-quality reconstruction. However, as the number of cascades increases, the improvements in reconstruction tend to become marginal, indicating possible excess model capacity. Knowledge distillation (KD) is an emerging technique to compress these models, in which a trained dee… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.05057v1-abstract-full').style.display = 'inline'; document.getElementById('2304.05057v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.05057v1-abstract-full" style="display: none;"> Deep cascaded architectures for magnetic resonance imaging (MRI) acceleration have shown remarkable success in providing high-quality reconstruction. However, as the number of cascades increases, the improvements in reconstruction tend to become marginal, indicating possible excess model capacity. Knowledge distillation (KD) is an emerging technique to compress these models, in which a trained deep teacher network is used to distill knowledge to a smaller student network such that the student learns to mimic the behavior of the teacher. Most KD methods focus on effectively training the student with a pre-trained teacher unaware of the student model. We propose SFT-KD-Recon, a student-friendly teacher training approach along with the student as a prior step to KD to make the teacher aware of the structure and capacity of the student and enable aligning the representations of the teacher with the student. In SFT, the teacher is jointly trained with the unfolded branch configurations of the student blocks using three loss terms - teacher-reconstruction loss, student-reconstruction loss, and teacher-student imitation loss, followed by KD of the student. We perform extensive experiments for MRI acceleration in 4x and 5x under-sampling on the brain and cardiac datasets on five KD methods using the proposed approach as a prior step. We consider the DC-CNN architecture and setup teacher as D5C5 (141765 parameters), and student as D3C5 (49285 parameters), denoting a compression of 2.87:1. Results show that (i) our approach consistently improves the KD methods with improved reconstruction performance and image quality, and (ii) the student distilled using our approach is competitive with the teacher, with the performance gap reduced from 0.53 dB to 0.03 dB. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.05057v1-abstract-full').style.display = 'none'; document.getElementById('2304.05057v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 8 figures. Accepted for publication at MIDL 2023. Code for our proposed method is available at https://github.com/GayathriMatcha/SFT-KD-Recon</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.15527">arXiv:2211.15527</a> <span> [<a href="https://arxiv.org/pdf/2211.15527">pdf</a>, <a href="https://arxiv.org/format/2211.15527">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A Study of Representational Properties of Unsupervised Anomaly Detection in Brain MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Das%2C+A">Ayantika Das</a>, <a href="/search/cs?searchtype=author&query=Palla%2C+A">Arun Palla</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.15527v1-abstract-short" style="display: inline;"> Anomaly detection in MRI is of high clinical value in imaging and diagnosis. Unsupervised methods for anomaly detection provide interesting formulations based on reconstruction or latent embedding, offering a way to observe properties related to factorization. We study four existing modeling methods, and report our empirical observations using simple data science tools, to seek outcomes from the p… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.15527v1-abstract-full').style.display = 'inline'; document.getElementById('2211.15527v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.15527v1-abstract-full" style="display: none;"> Anomaly detection in MRI is of high clinical value in imaging and diagnosis. Unsupervised methods for anomaly detection provide interesting formulations based on reconstruction or latent embedding, offering a way to observe properties related to factorization. We study four existing modeling methods, and report our empirical observations using simple data science tools, to seek outcomes from the perspective of factorization as it would be most relevant to the task of unsupervised anomaly detection, considering the case of brain structural MRI. Our study indicates that anomaly detection algorithms that exhibit factorization related properties are well capacitated with delineatory capabilities to distinguish between normal and anomaly data. We have validated our observations in multiple anomaly and normal datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.15527v1-abstract-full').style.display = 'none'; document.getElementById('2211.15527v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at MICCAI Medical Applications with Disentanglements (MAD) Workshop 2022 https://mad.ikim.nrw/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.07635">arXiv:2211.07635</a> <span> [<a href="https://arxiv.org/pdf/2211.07635">pdf</a>, <a href="https://arxiv.org/format/2211.07635">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Learnable Spatio-Temporal Map Embeddings for Deep Inertial Localization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Melamed%2C+D">Dennis Melamed</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Karnik Ram</a>, <a href="/search/cs?searchtype=author&query=Roy%2C+V">Vivek Roy</a>, <a href="/search/cs?searchtype=author&query=Kitani%2C+K">Kris Kitani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.07635v1-abstract-short" style="display: inline;"> Indoor localization systems often fuse inertial odometry with map information via hand-defined methods to reduce odometry drift, but such methods are sensitive to noise and struggle to generalize across odometry sources. To address the robustness problem in map utilization, we propose a data-driven prior on possible user locations in a map by combining learned spatial map embeddings and temporal o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.07635v1-abstract-full').style.display = 'inline'; document.getElementById('2211.07635v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.07635v1-abstract-full" style="display: none;"> Indoor localization systems often fuse inertial odometry with map information via hand-defined methods to reduce odometry drift, but such methods are sensitive to noise and struggle to generalize across odometry sources. To address the robustness problem in map utilization, we propose a data-driven prior on possible user locations in a map by combining learned spatial map embeddings and temporal odometry embeddings. Our prior learns to encode which map regions are feasible locations for a user more accurately than previous hand-defined methods. This prior leads to a 49% improvement in inertial-only localization accuracy when used in a particle filter. This result is significant, as it shows that our relative positioning method can match the performance of absolute positioning using bluetooth beacons. To show the generalizability of our method, we also show similar improvements using wheel encoder odometry. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.07635v1-abstract-full').style.display = 'none'; document.getElementById('2211.07635v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project page at https://klabcmu.github.io/learned-map-prior/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.11886">arXiv:2207.11886</a> <span> [<a href="https://arxiv.org/pdf/2207.11886">pdf</a>, <a href="https://arxiv.org/format/2207.11886">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Deep learning based non-contact physiological monitoring in Neonatal Intensive Care Unit </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Sahoo%2C+N+N">Nicky Nirlipta Sahoo</a>, <a href="/search/cs?searchtype=author&query=Murugesan%2C+B">Balamurali Murugesan</a>, <a href="/search/cs?searchtype=author&query=Das%2C+A">Ayantika Das</a>, <a href="/search/cs?searchtype=author&query=Karthik%2C+S">Srinivasa Karthik</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Leonhardt%2C+S">Steffen Leonhardt</a>, <a href="/search/cs?searchtype=author&query=Joseph%2C+J">Jayaraj Joseph</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.11886v1-abstract-short" style="display: inline;"> Preterm babies in the Neonatal Intensive Care Unit (NICU) have to undergo continuous monitoring of their cardiac health. Conventional monitoring approaches are contact-based, making the neonates prone to various nosocomial infections. Video-based monitoring approaches have opened up potential avenues for contactless measurement. This work presents a pipeline for remote estimation of cardiopulmonar… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.11886v1-abstract-full').style.display = 'inline'; document.getElementById('2207.11886v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.11886v1-abstract-full" style="display: none;"> Preterm babies in the Neonatal Intensive Care Unit (NICU) have to undergo continuous monitoring of their cardiac health. Conventional monitoring approaches are contact-based, making the neonates prone to various nosocomial infections. Video-based monitoring approaches have opened up potential avenues for contactless measurement. This work presents a pipeline for remote estimation of cardiopulmonary signals from videos in NICU setup. We have proposed an end-to-end deep learning (DL) model that integrates a non-learning based approach to generate surrogate ground truth (SGT) labels for supervision, thus refraining from direct dependency on true ground truth labels. We have performed an extended qualitative and quantitative analysis to examine the efficacy of our proposed DL-based pipeline and achieved an overall average mean absolute error of 4.6 beats per minute (bpm) and root mean square error of 6.2 bpm in the estimated heart rate. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.11886v1-abstract-full').style.display = 'none'; document.getElementById('2207.11886v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.01791">arXiv:2207.01791</a> <span> [<a href="https://arxiv.org/pdf/2207.01791">pdf</a>, <a href="https://arxiv.org/format/2207.01791">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.compmedimag.2021.101942">10.1016/j.compmedimag.2021.101942 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A deep cascade of ensemble of dual domain networks with gradient-based T1 assistance and perceptual refinement for fast MRI reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Murugesan%2C+B">Balamurali Murugesan</a>, <a href="/search/cs?searchtype=author&query=Ramanarayanan%2C+S">Sriprabha Ramanarayanan</a>, <a href="/search/cs?searchtype=author&query=Vijayarangan%2C+S">Sricharan Vijayarangan</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Jagannathan%2C+N+R">Naranamangalam R Jagannathan</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.01791v1-abstract-short" style="display: inline;"> Deep learning networks have shown promising results in fast magnetic resonance imaging (MRI) reconstruction. In our work, we develop deep networks to further improve the quantitative and the perceptual quality of reconstruction. To begin with, we propose reconsynergynet (RSN), a network that combines the complementary benefits of independently operating on both the image and the Fourier domain. Fo… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.01791v1-abstract-full').style.display = 'inline'; document.getElementById('2207.01791v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.01791v1-abstract-full" style="display: none;"> Deep learning networks have shown promising results in fast magnetic resonance imaging (MRI) reconstruction. In our work, we develop deep networks to further improve the quantitative and the perceptual quality of reconstruction. To begin with, we propose reconsynergynet (RSN), a network that combines the complementary benefits of independently operating on both the image and the Fourier domain. For a single-coil acquisition, we introduce deep cascade RSN (DC-RSN), a cascade of RSN blocks interleaved with data fidelity (DF) units. Secondly, we improve the structure recovery of DC-RSN for T2 weighted Imaging (T2WI) through assistance of T1 weighted imaging (T1WI), a sequence with short acquisition time. T1 assistance is provided to DC-RSN through a gradient of log feature (GOLF) fusion. Furthermore, we propose perceptual refinement network (PRN) to refine the reconstructions for better visual information fidelity (VIF), a metric highly correlated to radiologists opinion on the image quality. Lastly, for multi-coil acquisition, we propose variable splitting RSN (VS-RSN), a deep cascade of blocks, each block containing RSN, multi-coil DF unit, and a weighted average module. We extensively validate our models DC-RSN and VS-RSN for single-coil and multi-coil acquisitions and report the state-of-the-art performance. We obtain a SSIM of 0.768, 0.923, 0.878 for knee single-coil-4x, multi-coil-4x, and multi-coil-8x in fastMRI. We also conduct experiments to demonstrate the efficacy of GOLF based T1 assistance and PRN. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.01791v1-abstract-full').style.display = 'none'; document.getElementById('2207.01791v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in CMIG 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.10396">arXiv:2202.10396</a> <span> [<a href="https://arxiv.org/pdf/2202.10396">pdf</a>, <a href="https://arxiv.org/format/2202.10396">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MIST GAN: Modality Imputation Using Style Transfer for MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Raju%2C+J+C">Jaya Chandra Raju</a>, <a href="/search/cs?searchtype=author&query=Gayatri%2C+K+S">Kompella Subha Gayatri</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Rangasami%2C+R">Rajeswaran Rangasami</a>, <a href="/search/cs?searchtype=author&query=Ramachandran%2C+R">Rajoo Ramachandran</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohansankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.10396v1-abstract-short" style="display: inline;"> MRI entails a great amount of cost, time and effort for the generation of all the modalities that are recommended for efficient diagnosis and treatment planning. Recent advancements in deep learning research show that generative models have achieved substantial improvement in the aspects of style transfer and image synthesis. In this work, we formulate generating the missing MR modality from exist… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.10396v1-abstract-full').style.display = 'inline'; document.getElementById('2202.10396v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.10396v1-abstract-full" style="display: none;"> MRI entails a great amount of cost, time and effort for the generation of all the modalities that are recommended for efficient diagnosis and treatment planning. Recent advancements in deep learning research show that generative models have achieved substantial improvement in the aspects of style transfer and image synthesis. In this work, we formulate generating the missing MR modality from existing MR modalities as an imputation problem using style transfer. With a multiple-to-one mapping, we model a network that accommodates domain specific styles in generating the target image. We analyse the style diversity both within and across MR modalities. Our model is tested on the BraTS'18 dataset and the results obtained are observed to be on par with the state-of-the-art in terms of visual metrics, SSIM and PSNR. After being evaluated by two expert radiologists, we show that our model is efficient, extendable, and suitable for clinical applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.10396v1-abstract-full').style.display = 'none'; document.getElementById('2202.10396v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.05055">arXiv:2111.05055</a> <span> [<a href="https://arxiv.org/pdf/2111.05055">pdf</a>, <a href="https://arxiv.org/format/2111.05055">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MAC-ReconNet: A Multiple Acquisition Context based Convolutional Neural Network for MR Image Reconstruction using Dynamic Weight Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ramanarayanan%2C+S">Sriprabha Ramanarayanan</a>, <a href="/search/cs?searchtype=author&query=Murugesan%2C+B">Balamurali Murugesan</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.05055v2-abstract-short" style="display: inline;"> Convolutional Neural network-based MR reconstruction methods have shown to provide fast and high quality reconstructions. A primary drawback with a CNN-based model is that it lacks flexibility and can effectively operate only for a specific acquisition context limiting practical applicability. By acquisition context, we mean a specific combination of three input settings considered namely, the ana… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.05055v2-abstract-full').style.display = 'inline'; document.getElementById('2111.05055v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.05055v2-abstract-full" style="display: none;"> Convolutional Neural network-based MR reconstruction methods have shown to provide fast and high quality reconstructions. A primary drawback with a CNN-based model is that it lacks flexibility and can effectively operate only for a specific acquisition context limiting practical applicability. By acquisition context, we mean a specific combination of three input settings considered namely, the anatomy under study, undersampling mask pattern and acceleration factor for undersampling. The model could be trained jointly on images combining multiple contexts. However the model does not meet the performance of context specific models nor extensible to contexts unseen at train time. This necessitates a modification to the existing architecture in generating context specific weights so as to incorporate flexibility to multiple contexts. We propose a multiple acquisition context based network, called MAC-ReconNet for MRI reconstruction, flexible to multiple acquisition contexts and generalizable to unseen contexts for applicability in real scenarios. The proposed network has an MRI reconstruction module and a dynamic weight prediction (DWP) module. The DWP module takes the corresponding acquisition context information as input and learns the context-specific weights of the reconstruction module which changes dynamically with context at run time. We show that the proposed approach can handle multiple contexts based on cardiac and brain datasets, Gaussian and Cartesian undersampling patterns and five acceleration factors. The proposed network outperforms the naive jointly trained model and gives competitive results with the context-specific models both quantitatively and qualitatively. We also demonstrate the generalizability of our model by testing on contexts unseen at train time. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.05055v2-abstract-full').style.display = 'none'; document.getElementById('2111.05055v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proceedings of the Third Conference on Medical Imaging with Deep Learning, PMLR 121:696-708, 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.10400">arXiv:2103.10400</a> <span> [<a href="https://arxiv.org/pdf/2103.10400">pdf</a>, <a href="https://arxiv.org/format/2103.10400">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> RP-VIO: Robust Plane-based Visual-Inertial Odometry for Dynamic Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ram%2C+K">Karnik Ram</a>, <a href="/search/cs?searchtype=author&query=Kharyal%2C+C">Chaitanya Kharyal</a>, <a href="/search/cs?searchtype=author&query=Harithas%2C+S+S">Sudarshan S. Harithas</a>, <a href="/search/cs?searchtype=author&query=Krishna%2C+K+M">K. Madhava Krishna</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.10400v2-abstract-short" style="display: inline;"> Modern visual-inertial navigation systems (VINS) are faced with a critical challenge in real-world deployment: they need to operate reliably and robustly in highly dynamic environments. Current best solutions merely filter dynamic objects as outliers based on the semantics of the object category. Such an approach does not scale as it requires semantic classifiers to encompass all possibly-moving o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.10400v2-abstract-full').style.display = 'inline'; document.getElementById('2103.10400v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.10400v2-abstract-full" style="display: none;"> Modern visual-inertial navigation systems (VINS) are faced with a critical challenge in real-world deployment: they need to operate reliably and robustly in highly dynamic environments. Current best solutions merely filter dynamic objects as outliers based on the semantics of the object category. Such an approach does not scale as it requires semantic classifiers to encompass all possibly-moving object classes; this is hard to define, let alone deploy. On the other hand, many real-world environments exhibit strong structural regularities in the form of planes such as walls and ground surfaces, which are also crucially static. We present RP-VIO, a monocular visual-inertial odometry system that leverages the simple geometry of these planes for improved robustness and accuracy in challenging dynamic environments. Since existing datasets have a limited number of dynamic elements, we also present a highly-dynamic, photorealistic synthetic dataset for a more effective evaluation of the capabilities of modern VINS systems. We evaluate our approach on this dataset, and three diverse sequences from standard datasets including two real-world dynamic sequences and show a significant improvement in robustness and accuracy over a state-of-the-art monocular visual-inertial odometry system. We also show in simulation an improvement over a simple dynamic-features masking approach. Our code and dataset are publicly available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.10400v2-abstract-full').style.display = 'none'; document.getElementById('2103.10400v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Presented at IROS 2021, code and dataset available at https://karnikram.info/rp-vio</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.05615">arXiv:2103.05615</a> <span> [<a href="https://arxiv.org/pdf/2103.05615">pdf</a>, <a href="https://arxiv.org/format/2103.05615">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> Eternal-Thing 2.0: Analog-Trojan Resilient Ripple-Less Solar Energy Harvesting System for Sustainable IoT in Smart Cities and Smart Villages </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ram%2C+S+K">Saswat K. Ram</a>, <a href="/search/cs?searchtype=author&query=Sahoo%2C+S+R">Sauvagya R. Sahoo</a>, <a href="/search/cs?searchtype=author&query=Das%2C+B+B">Banee B. Das</a>, <a href="/search/cs?searchtype=author&query=Mahapatra%2C+K">Kamalakanta Mahapatra</a>, <a href="/search/cs?searchtype=author&query=Mohanty%2C+S+P">Saraju P. Mohanty</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.05615v1-abstract-short" style="display: inline;"> Recently, harvesting natural energy is gaining more attention than other conventional approaches for sustainable Internet-of-Things (IoT). System on chip (SoC) power requirement for the IoT and generating higher voltages on-chip is a massive challenge for on-chip peripherals and systems. Many sensors are employed in smart cities and smart villages in decision-making, whose power requirement is an… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.05615v1-abstract-full').style.display = 'inline'; document.getElementById('2103.05615v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.05615v1-abstract-full" style="display: none;"> Recently, harvesting natural energy is gaining more attention than other conventional approaches for sustainable Internet-of-Things (IoT). System on chip (SoC) power requirement for the IoT and generating higher voltages on-chip is a massive challenge for on-chip peripherals and systems. Many sensors are employed in smart cities and smart villages in decision-making, whose power requirement is an issue, and it must be uninterrupted. Previously, we presented Security-by-Design (SbD) principle to bring energy dissipation and cybersecurity together through our "Eternal-Thing". In this paper, an on-chip reliable energy harvesting system (EHS) is designed for IoT end node devices which is called "Eternal-Thing 2.0". The management section monitors the process load and also the recharging of the battery or super-capacitor. An efficient maximum power point tracking (MPPT) algorithm is used to avoid quiescent power consumption. The reliability of the proposed EHS is improved by using an aging tolerant ring oscillator. The proposed EHS is intended and simulated in CMOS 90nm technology. The output voltage is within the vary of 3-3.55V with an input of 1-1.5V. The EHS consumes 22 micro Watt of power, that satisfies the ultra-low-power necessities of IoT sensible nodes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.05615v1-abstract-full').style.display = 'none'; document.getElementById('2103.05615v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 15 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.03690">arXiv:2103.03690</a> <span> [<a href="https://arxiv.org/pdf/2103.03690">pdf</a>, <a href="https://arxiv.org/format/2103.03690">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/BoKSS52540.2021.00013">10.1109/BoKSS52540.2021.00013 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Addressing Research Software Sustainability via Institutes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Katz%2C+D+S">Daniel S. Katz</a>, <a href="/search/cs?searchtype=author&query=Carver%2C+J+C">Jeffrey C. Carver</a>, <a href="/search/cs?searchtype=author&query=Hong%2C+N+P+C">Neil P. Chue Hong</a>, <a href="/search/cs?searchtype=author&query=Gesing%2C+S">Sandra Gesing</a>, <a href="/search/cs?searchtype=author&query=Hettrick%2C+S">Simon Hettrick</a>, <a href="/search/cs?searchtype=author&query=Honeyman%2C+T">Tom Honeyman</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Karthik Ram</a>, <a href="/search/cs?searchtype=author&query=Weber%2C+N">Nicholas Weber</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.03690v1-abstract-short" style="display: inline;"> Research software is essential to modern research, but it requires ongoing human effort to sustain: to continually adapt to changes in dependencies, to fix bugs, and to add new features. Software sustainability institutes, amongst others, develop, maintain, and disseminate best practices for research software sustainability, and build community around them. These practices can both reduce the amou… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.03690v1-abstract-full').style.display = 'inline'; document.getElementById('2103.03690v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.03690v1-abstract-full" style="display: none;"> Research software is essential to modern research, but it requires ongoing human effort to sustain: to continually adapt to changes in dependencies, to fix bugs, and to add new features. Software sustainability institutes, amongst others, develop, maintain, and disseminate best practices for research software sustainability, and build community around them. These practices can both reduce the amount of effort that is needed and create an environment where the effort is appreciated and rewarded. The UK SSI is such an institute, and the US URSSI and the Australian AuSSI are planning to become institutes, and this extended abstract discusses them and the strengths and weaknesses of this approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.03690v1-abstract-full').style.display = 'none'; document.getElementById('2103.03690v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted by ICSE 2021 BokSS Workshop (https://bokss.github.io/bokss2021/)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.05450">arXiv:2102.05450</a> <span> [<a href="https://arxiv.org/pdf/2102.05450">pdf</a>, <a href="https://arxiv.org/format/2102.05450">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Reference-based Texture transfer for Single Image Super-resolution of Magnetic Resonance images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=K%2C+M+M+K">Madhu Mithra K K</a>, <a href="/search/cs?searchtype=author&query=Ramanarayanan%2C+S">Sriprabha Ramanarayanan</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.05450v1-abstract-short" style="display: inline;"> Magnetic Resonance Imaging (MRI) is a valuable clinical diagnostic modality for spine pathologies with excellent characterization for infection, tumor, degenerations, fractures and herniations. However in surgery, image-guided spinal procedures continue to rely on CT and fluoroscopy, as MRI slice resolutions are typically insufficient. Building upon state-of-the-art single image super-resolution,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.05450v1-abstract-full').style.display = 'inline'; document.getElementById('2102.05450v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.05450v1-abstract-full" style="display: none;"> Magnetic Resonance Imaging (MRI) is a valuable clinical diagnostic modality for spine pathologies with excellent characterization for infection, tumor, degenerations, fractures and herniations. However in surgery, image-guided spinal procedures continue to rely on CT and fluoroscopy, as MRI slice resolutions are typically insufficient. Building upon state-of-the-art single image super-resolution, we propose a reference-based, unpaired multi-contrast texture-transfer strategy for deep learning based in-plane and across-plane MRI super-resolution. We use the scattering transform to relate the texture features of image patches to unpaired reference image patches, and additionally a loss term for multi-contrast texture. We apply our scheme in different super-resolution architectures, observing improvement in PSNR and SSIM for 4x super-resolution in most of the cases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.05450v1-abstract-full').style.display = 'none'; document.getElementById('2102.05450v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at ISBI 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.07502">arXiv:2007.07502</a> <span> [<a href="https://arxiv.org/pdf/2007.07502">pdf</a>, <a href="https://arxiv.org/format/2007.07502">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Monocular Retinal Depth Estimation and Joint Optic Disc and Cup Segmentation using Adversarial Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shankaranarayana%2C+S+M">Sharath M Shankaranarayana</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Mitra%2C+K">Kaushik Mitra</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.07502v1-abstract-short" style="display: inline;"> One of the important parameters for the assessment of glaucoma is optic nerve head (ONH) evaluation, which usually involves depth estimation and subsequent optic disc and cup boundary extraction. Depth is usually obtained explicitly from imaging modalities like optical coherence tomography (OCT) and is very challenging to estimate depth from a single RGB image. To this end, we propose a novel meth… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.07502v1-abstract-full').style.display = 'inline'; document.getElementById('2007.07502v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.07502v1-abstract-full" style="display: none;"> One of the important parameters for the assessment of glaucoma is optic nerve head (ONH) evaluation, which usually involves depth estimation and subsequent optic disc and cup boundary extraction. Depth is usually obtained explicitly from imaging modalities like optical coherence tomography (OCT) and is very challenging to estimate depth from a single RGB image. To this end, we propose a novel method using adversarial network to predict depth map from a single image. The proposed depth estimation technique is trained and evaluated using individual retinal images from INSPIRE-stereo dataset. We obtain a very high average correlation coefficient of 0.92 upon five fold cross validation outperforming the state of the art. We then use the depth estimation process as a proxy task for joint optic disc and cup segmentation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.07502v1-abstract-full').style.display = 'none'; document.getElementById('2007.07502v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.08589">arXiv:2006.08589</a> <span> [<a href="https://arxiv.org/pdf/2006.08589">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Digital Libraries">cs.DL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> The role of metadata in reproducible computational research </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Leipzig%2C+J">Jeremy Leipzig</a>, <a href="/search/cs?searchtype=author&query=N%C3%BCst%2C+D">Daniel N眉st</a>, <a href="/search/cs?searchtype=author&query=Hoyt%2C+C+T">Charles Tapley Hoyt</a>, <a href="/search/cs?searchtype=author&query=Soiland-Reyes%2C+S">Stian Soiland-Reyes</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Karthik Ram</a>, <a href="/search/cs?searchtype=author&query=Greenberg%2C+J">Jane Greenberg</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.08589v2-abstract-short" style="display: inline;"> Reproducible computational research (RCR) is the keystone of the scientific method for in silico analyses, packaging the transformation of raw data to published results. In addition to its role in research integrity, RCR has the capacity to significantly accelerate evaluation and reuse. This potential and wide-support for the FAIR principles have motivated interest in metadata standards supporting… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.08589v2-abstract-full').style.display = 'inline'; document.getElementById('2006.08589v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.08589v2-abstract-full" style="display: none;"> Reproducible computational research (RCR) is the keystone of the scientific method for in silico analyses, packaging the transformation of raw data to published results. In addition to its role in research integrity, RCR has the capacity to significantly accelerate evaluation and reuse. This potential and wide-support for the FAIR principles have motivated interest in metadata standards supporting RCR. Metadata provides context and provenance to raw data and methods and is essential to both discovery and validation. Despite this shared connection with scientific data, few studies have explicitly described the relationship between metadata and RCR. This article employs a functional content analysis to identify metadata standards that support RCR functions across an analytic stack consisting of input data, tools, notebooks, pipelines, and publications. Our article provides background context, explores gaps, and discovers component trends of embeddedness and methodology weight from which we derive recommendations for future work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.08589v2-abstract-full').style.display = 'none'; document.getElementById('2006.08589v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">53 pages, 18 figures, 2 tables, 216 references</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.05319">arXiv:2004.05319</a> <span> [<a href="https://arxiv.org/pdf/2004.05319">pdf</a>, <a href="https://arxiv.org/format/2004.05319">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> KD-MRI: A knowledge distillation framework for image reconstruction and image restoration in MRI workflow </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Murugesan%2C+B">Balamurali Murugesan</a>, <a href="/search/cs?searchtype=author&query=Vijayarangan%2C+S">Sricharan Vijayarangan</a>, <a href="/search/cs?searchtype=author&query=Sarveswaran%2C+K">Kaushik Sarveswaran</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.05319v1-abstract-short" style="display: inline;"> Deep learning networks are being developed in every stage of the MRI workflow and have provided state-of-the-art results. However, this has come at the cost of increased computation requirement and storage. Hence, replacing the networks with compact models at various stages in the MRI workflow can significantly reduce the required storage space and provide considerable speedup. In computer vision,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.05319v1-abstract-full').style.display = 'inline'; document.getElementById('2004.05319v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.05319v1-abstract-full" style="display: none;"> Deep learning networks are being developed in every stage of the MRI workflow and have provided state-of-the-art results. However, this has come at the cost of increased computation requirement and storage. Hence, replacing the networks with compact models at various stages in the MRI workflow can significantly reduce the required storage space and provide considerable speedup. In computer vision, knowledge distillation is a commonly used method for model compression. In our work, we propose a knowledge distillation (KD) framework for the image to image problems in the MRI workflow in order to develop compact, low-parameter models without a significant drop in performance. We propose a combination of the attention-based feature distillation method and imitation loss and demonstrate its effectiveness on the popular MRI reconstruction architecture, DC-CNN. We conduct extensive experiments using Cardiac, Brain, and Knee MRI datasets for 4x, 5x and 8x accelerations. We observed that the student network trained with the assistance of the teacher using our proposed KD framework provided significant improvement over the student network trained without assistance across all the datasets and acceleration factors. Specifically, for the Knee dataset, the student network achieves $65\%$ parameter reduction, 2x faster CPU running time, and 1.5x faster GPU running time compared to the teacher. Furthermore, we compare our attention-based feature distillation method with other feature distillation methods. We also conduct an ablative study to understand the significance of attention-based distillation and imitation loss. We also extend our KD framework for MRI super-resolution and show encouraging results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.05319v1-abstract-full').style.display = 'none'; document.getElementById('2004.05319v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in MIDL 2020. Code available</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.02755">arXiv:2004.02755</a> <span> [<a href="https://arxiv.org/pdf/2004.02755">pdf</a>, <a href="https://arxiv.org/format/2004.02755">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Detection and skeletonization of single neurons and tracer injections using topological methods </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wang%2C+D">Dingkang Wang</a>, <a href="/search/cs?searchtype=author&query=Magee%2C+L">Lucas Magee</a>, <a href="/search/cs?searchtype=author&query=Huo%2C+B">Bing-Xing Huo</a>, <a href="/search/cs?searchtype=author&query=Banerjee%2C+S">Samik Banerjee</a>, <a href="/search/cs?searchtype=author&query=Li%2C+X">Xu Li</a>, <a href="/search/cs?searchtype=author&query=Jayakumar%2C+J">Jaikishan Jayakumar</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+M+K">Meng Kuan Lin</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+S">Suyi Wang</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y">Yusu Wang</a>, <a href="/search/cs?searchtype=author&query=Mitra%2C+P+P">Partha P. Mitra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.02755v1-abstract-short" style="display: inline;"> Neuroscientific data analysis has traditionally relied on linear algebra and stochastic process theory. However, the tree-like shapes of neurons cannot be described easily as points in a vector space (the subtraction of two neuronal shapes is not a meaningful operation), and methods from computational topology are better suited to their analysis. Here we introduce methods from Discrete Morse (DM)… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.02755v1-abstract-full').style.display = 'inline'; document.getElementById('2004.02755v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.02755v1-abstract-full" style="display: none;"> Neuroscientific data analysis has traditionally relied on linear algebra and stochastic process theory. However, the tree-like shapes of neurons cannot be described easily as points in a vector space (the subtraction of two neuronal shapes is not a meaningful operation), and methods from computational topology are better suited to their analysis. Here we introduce methods from Discrete Morse (DM) Theory to extract the tree-skeletons of individual neurons from volumetric brain image data, and to summarize collections of neurons labelled by tracer injections. Since individual neurons are topologically trees, it is sensible to summarize the collection of neurons using a consensus tree-shape that provides a richer information summary than the traditional regional 'connectivity matrix' approach. The conceptually elegant DM approach lacks hand-tuned parameters and captures global properties of the data as opposed to previous approaches which are inherently local. For individual skeletonization of sparsely labelled neurons we obtain substantial performance gains over state-of-the-art non-topological methods (over 10% improvements in precision and faster proofreading). The consensus-tree summary of tracer injections incorporates the regional connectivity matrix information, but in addition captures the collective collateral branching patterns of the set of neurons connected to the injection site, and provides a bridge between single-neuron morphology and tracer-injection data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.02755v1-abstract-full').style.display = 'none'; document.getElementById('2004.02755v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages (14 pages main-text and 6 pages supplementary information). 5 main-text figures. 5 supplementary figures. 2 supplementary tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.11626">arXiv:2002.11626</a> <span> [<a href="https://arxiv.org/pdf/2002.11626">pdf</a>, <a href="https://arxiv.org/format/2002.11626">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Digital Libraries">cs.DL</span> </div> </div> <p class="title is-5 mathjax"> A Realistic Guide to Making Data Available Alongside Code to Improve Reproducibility </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tierney%2C+N+J">Nicholas J Tierney</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Karthik Ram</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.11626v1-abstract-short" style="display: inline;"> Data makes science possible. Sharing data improves visibility, and makes the research process transparent. This increases trust in the work, and allows for independent reproduction of results. However, a large proportion of data from published research is often only available to the original authors. Despite the obvious benefits of sharing data, and scientists' advocating for the importance of sha… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.11626v1-abstract-full').style.display = 'inline'; document.getElementById('2002.11626v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.11626v1-abstract-full" style="display: none;"> Data makes science possible. Sharing data improves visibility, and makes the research process transparent. This increases trust in the work, and allows for independent reproduction of results. However, a large proportion of data from published research is often only available to the original authors. Despite the obvious benefits of sharing data, and scientists' advocating for the importance of sharing data, most advice on sharing data discusses its broader benefits, rather than the practical considerations of sharing. This paper provides practical, actionable advice on how to actually share data alongside research. The key message is sharing data falls on a continuum, and entering it should come with minimal barriers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.11626v1-abstract-full').style.display = 'none'; document.getElementById('2002.11626v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Both authors contributed equally to the work, 35 pages, 7 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2001.10641">arXiv:2001.10641</a> <span> [<a href="https://arxiv.org/pdf/2001.10641">pdf</a>, <a href="https://arxiv.org/format/2001.10641">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.32614/RJ-2020-007">10.32614/RJ-2020-007 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> The Rockerverse: Packages and Applications for Containerization with R </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=N%C3%BCst%2C+D">Daniel N眉st</a>, <a href="/search/cs?searchtype=author&query=Eddelbuettel%2C+D">Dirk Eddelbuettel</a>, <a href="/search/cs?searchtype=author&query=Bennett%2C+D">Dom Bennett</a>, <a href="/search/cs?searchtype=author&query=Cannoodt%2C+R">Robrecht Cannoodt</a>, <a href="/search/cs?searchtype=author&query=Clark%2C+D">Dav Clark</a>, <a href="/search/cs?searchtype=author&query=Daroczi%2C+G">Gergely Daroczi</a>, <a href="/search/cs?searchtype=author&query=Edmondson%2C+M">Mark Edmondson</a>, <a href="/search/cs?searchtype=author&query=Fay%2C+C">Colin Fay</a>, <a href="/search/cs?searchtype=author&query=Hughes%2C+E">Ellis Hughes</a>, <a href="/search/cs?searchtype=author&query=Kjeldgaard%2C+L">Lars Kjeldgaard</a>, <a href="/search/cs?searchtype=author&query=Lopp%2C+S">Sean Lopp</a>, <a href="/search/cs?searchtype=author&query=Marwick%2C+B">Ben Marwick</a>, <a href="/search/cs?searchtype=author&query=Nolis%2C+H">Heather Nolis</a>, <a href="/search/cs?searchtype=author&query=Nolis%2C+J">Jacqueline Nolis</a>, <a href="/search/cs?searchtype=author&query=Ooi%2C+H">Hong Ooi</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Karthik Ram</a>, <a href="/search/cs?searchtype=author&query=Ross%2C+N">Noam Ross</a>, <a href="/search/cs?searchtype=author&query=Shepherd%2C+L">Lori Shepherd</a>, <a href="/search/cs?searchtype=author&query=S%C3%B3lymos%2C+P">P茅ter S贸lymos</a>, <a href="/search/cs?searchtype=author&query=Swetnam%2C+T+L">Tyson Lee Swetnam</a>, <a href="/search/cs?searchtype=author&query=Turaga%2C+N">Nitesh Turaga</a>, <a href="/search/cs?searchtype=author&query=Van+Petegem%2C+C">Charlotte Van Petegem</a>, <a href="/search/cs?searchtype=author&query=Williams%2C+J">Jason Williams</a>, <a href="/search/cs?searchtype=author&query=Willis%2C+C">Craig Willis</a>, <a href="/search/cs?searchtype=author&query=Xiao%2C+N">Nan Xiao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2001.10641v4-abstract-short" style="display: inline;"> The Rocker Project provides widely used Docker images for R across different application scenarios. This article surveys downstream projects that build upon the Rocker Project images and presents the current state of R packages for managing Docker images and controlling containers. These use cases cover diverse topics such as package development, reproducible research, collaborative work, cloud-ba… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.10641v4-abstract-full').style.display = 'inline'; document.getElementById('2001.10641v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2001.10641v4-abstract-full" style="display: none;"> The Rocker Project provides widely used Docker images for R across different application scenarios. This article surveys downstream projects that build upon the Rocker Project images and presents the current state of R packages for managing Docker images and controlling containers. These use cases cover diverse topics such as package development, reproducible research, collaborative work, cloud-based data processing, and production deployment of services. The variety of applications demonstrates the power of the Rocker Project specifically and containerisation in general. Across the diverse ways to use containers, we identified common themes: reproducible environments, scalability and efficiency, and portability across clouds. We conclude that the current growth and diversification of use cases is likely to continue its positive impact, but see the need for consolidating the Rockerverse ecosystem of packages, developing common practices for applications, and exploring alternative containerisation software. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.10641v4-abstract-full').style.display = 'none'; document.getElementById('2001.10641v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 January, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Source code for article available at https://github.com/nuest/rockerverse-paper/ Updated version includes some new paragraphs and corrections throughout the text; full diff available at https://github.com/nuest/rockerverse-paper/compare/preprint.v2...preprint.v3</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68N01 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> D.2.6; D.2.7; K.6.3 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> The R Journal (2020), 12:1, pages 437-461 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2001.02397">arXiv:2001.02397</a> <span> [<a href="https://arxiv.org/pdf/2001.02397">pdf</a>, <a href="https://arxiv.org/format/2001.02397">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DC-WCNN: A deep cascade of wavelet based convolutional neural networks for MR Image Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ramanarayanan%2C+S">Sriprabha Ramanarayanan</a>, <a href="/search/cs?searchtype=author&query=Murugesan%2C+B">Balamurali Murugesan</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2001.02397v1-abstract-short" style="display: inline;"> Several variants of Convolutional Neural Networks (CNN) have been developed for Magnetic Resonance (MR) image reconstruction. Among them, U-Net has shown to be the baseline architecture for MR image reconstruction. However, sub-sampling is performed by its pooling layers, causing information loss which in turn leads to blur and missing fine details in the reconstructed image. We propose a modifica… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.02397v1-abstract-full').style.display = 'inline'; document.getElementById('2001.02397v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2001.02397v1-abstract-full" style="display: none;"> Several variants of Convolutional Neural Networks (CNN) have been developed for Magnetic Resonance (MR) image reconstruction. Among them, U-Net has shown to be the baseline architecture for MR image reconstruction. However, sub-sampling is performed by its pooling layers, causing information loss which in turn leads to blur and missing fine details in the reconstructed image. We propose a modification to the U-Net architecture to recover fine structures. The proposed network is a wavelet packet transform based encoder-decoder CNN with residual learning called CNN. The proposed WCNN has discrete wavelet transform instead of pooling and inverse wavelet transform instead of unpooling layers and residual connections. We also propose a deep cascaded framework (DC-WCNN) which consists of cascades of WCNN and k-space data fidelity units to achieve high quality MR reconstruction. Experimental results show that WCNN and DC-WCNN give promising results in terms of evaluation metrics and better recovery of fine details as compared to other methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.02397v1-abstract-full').style.display = 'none'; document.getElementById('2001.02397v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in ISBI 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2001.02387">arXiv:2001.02387</a> <span> [<a href="https://arxiv.org/pdf/2001.02387">pdf</a>, <a href="https://arxiv.org/format/2001.02387">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A context based deep learning approach for unbalanced medical image segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Murugesan%2C+B">Balamurali Murugesan</a>, <a href="/search/cs?searchtype=author&query=Sarveswaran%2C+K">Kaushik Sarveswaran</a>, <a href="/search/cs?searchtype=author&query=S%2C+V+R">Vijaya Raghavan S</a>, <a href="/search/cs?searchtype=author&query=Shankaranarayana%2C+S+M">Sharath M Shankaranarayana</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2001.02387v1-abstract-short" style="display: inline;"> Automated medical image segmentation is an important step in many medical procedures. Recently, deep learning networks have been widely used for various medical image segmentation tasks, with U-Net and generative adversarial nets (GANs) being some of the commonly used ones. Foreground-background class imbalance is a common occurrence in medical images, and U-Net has difficulty in handling class im… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.02387v1-abstract-full').style.display = 'inline'; document.getElementById('2001.02387v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2001.02387v1-abstract-full" style="display: none;"> Automated medical image segmentation is an important step in many medical procedures. Recently, deep learning networks have been widely used for various medical image segmentation tasks, with U-Net and generative adversarial nets (GANs) being some of the commonly used ones. Foreground-background class imbalance is a common occurrence in medical images, and U-Net has difficulty in handling class imbalance because of its cross entropy (CE) objective function. Similarly, GAN also suffers from class imbalance because the discriminator looks at the entire image to classify it as real or fake. Since the discriminator is essentially a deep learning classifier, it is incapable of correctly identifying minor changes in small structures. To address these issues, we propose a novel context based CE loss function for U-Net, and a novel architecture Seg-GLGAN. The context based CE is a linear combination of CE obtained over the entire image and its region of interest (ROI). In Seg-GLGAN, we introduce a novel context discriminator to which the entire image and its ROI are fed as input, thus enforcing local context. We conduct extensive experiments using two challenging unbalanced datasets: PROMISE12 and ACDC. We observe that segmentation results obtained from our methods give better segmentation metrics as compared to various baseline methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.02387v1-abstract-full').style.display = 'none'; document.getElementById('2001.02387v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in ISBI 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1908.09262">arXiv:1908.09262</a> <span> [<a href="https://arxiv.org/pdf/1908.09262">pdf</a>, <a href="https://arxiv.org/format/1908.09262">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Recon-GLGAN: A Global-Local context based Generative Adversarial Network for MRI Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Murugesan%2C+B">Balamurali Murugesan</a>, <a href="/search/cs?searchtype=author&query=S%2C+V+R">Vijaya Raghavan S</a>, <a href="/search/cs?searchtype=author&query=Sarveswaran%2C+K">Kaushik Sarveswaran</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1908.09262v1-abstract-short" style="display: inline;"> Magnetic resonance imaging (MRI) is one of the best medical imaging modalities as it offers excellent spatial resolution and soft-tissue contrast. But, the usage of MRI is limited by its slow acquisition time, which makes it expensive and causes patient discomfort. In order to accelerate the acquisition, multiple deep learning networks have been proposed. Recently, Generative Adversarial Networks… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.09262v1-abstract-full').style.display = 'inline'; document.getElementById('1908.09262v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1908.09262v1-abstract-full" style="display: none;"> Magnetic resonance imaging (MRI) is one of the best medical imaging modalities as it offers excellent spatial resolution and soft-tissue contrast. But, the usage of MRI is limited by its slow acquisition time, which makes it expensive and causes patient discomfort. In order to accelerate the acquisition, multiple deep learning networks have been proposed. Recently, Generative Adversarial Networks (GANs) have shown promising results in MRI reconstruction. The drawback with the proposed GAN based methods is it does not incorporate the prior information about the end goal which could help in better reconstruction. For instance, in the case of cardiac MRI, the physician would be interested in the heart region which is of diagnostic relevance while excluding the peripheral regions. In this work, we show that incorporating prior information about a region of interest in the model would offer better performance. Thereby, we propose a novel GAN based architecture, Reconstruction Global-Local GAN (Recon-GLGAN) for MRI reconstruction. The proposed model contains a generator and a context discriminator which incorporates global and local contextual information from images. Our model offers significant performance improvement over the baseline models. Our experiments show that the concept of a context discriminator can be extended to existing GAN based reconstruction models to offer better performance. We also demonstrate that the reconstructions from the proposed method give segmentation results similar to fully sampled images. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.09262v1-abstract-full').style.display = 'none'; document.getElementById('1908.09262v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at MLMIR-MICCAIW 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1908.05311">arXiv:1908.05311</a> <span> [<a href="https://arxiv.org/pdf/1908.05311">pdf</a>, <a href="https://arxiv.org/format/1908.05311">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Conv-MCD: A Plug-and-Play Multi-task Module for Medical Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Murugesan%2C+B">Balamurali Murugesan</a>, <a href="/search/cs?searchtype=author&query=Sarveswaran%2C+K">Kaushik Sarveswaran</a>, <a href="/search/cs?searchtype=author&query=Shankaranarayana%2C+S+M">Sharath M Shankaranarayana</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Joseph%2C+J">Jayaraj Joseph</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1908.05311v1-abstract-short" style="display: inline;"> For the task of medical image segmentation, fully convolutional network (FCN) based architectures have been extensively used with various modifications. A rising trend in these architectures is to employ joint-learning of the target region with an auxiliary task, a method commonly known as multi-task learning. These approaches help impose smoothness and shape priors, which vanilla FCN approaches d… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.05311v1-abstract-full').style.display = 'inline'; document.getElementById('1908.05311v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1908.05311v1-abstract-full" style="display: none;"> For the task of medical image segmentation, fully convolutional network (FCN) based architectures have been extensively used with various modifications. A rising trend in these architectures is to employ joint-learning of the target region with an auxiliary task, a method commonly known as multi-task learning. These approaches help impose smoothness and shape priors, which vanilla FCN approaches do not necessarily incorporate. In this paper, we propose a novel plug-and-play module, which we term as Conv-MCD, which exploits structural information in two ways - i) using the contour map and ii) using the distance map, both of which can be obtained from ground truth segmentation maps with no additional annotation costs. The key benefit of our module is the ease of its addition to any state-of-the-art architecture, resulting in a significant improvement in performance with a minimal increase in parameters. To substantiate the above claim, we conduct extensive experiments using 4 state-of-the-art architectures across various evaluation metrics, and report a significant increase in performance in relation to the base networks. In addition to the aforementioned experiments, we also perform ablative studies and visualization of feature maps to further elucidate our approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.05311v1-abstract-full').style.display = 'none'; document.getElementById('1908.05311v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in MLMI 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1903.12536">arXiv:1903.12536</a> <span> [<a href="https://arxiv.org/pdf/1903.12536">pdf</a>, <a href="https://arxiv.org/format/1903.12536">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Deep Network for Capacitive ECG Denoising </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ravichandran%2C+V">Vignesh Ravichandran</a>, <a href="/search/cs?searchtype=author&query=Murugesan%2C+B">Balamurali Murugesan</a>, <a href="/search/cs?searchtype=author&query=Shankaranarayana%2C+S+M">Sharath M Shankaranarayana</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=P%2C+P+S">Preejith S. P</a>, <a href="/search/cs?searchtype=author&query=Joseph%2C+J">Jayaraj Joseph</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1903.12536v1-abstract-short" style="display: inline;"> Continuous monitoring of cardiac health under free living condition is crucial to provide effective care for patients undergoing post operative recovery and individuals with high cardiac risk like the elderly. Capacitive Electrocardiogram (cECG) is one such technology which allows comfortable and long term monitoring through its ability to measure biopotential in conditions without having skin con… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1903.12536v1-abstract-full').style.display = 'inline'; document.getElementById('1903.12536v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1903.12536v1-abstract-full" style="display: none;"> Continuous monitoring of cardiac health under free living condition is crucial to provide effective care for patients undergoing post operative recovery and individuals with high cardiac risk like the elderly. Capacitive Electrocardiogram (cECG) is one such technology which allows comfortable and long term monitoring through its ability to measure biopotential in conditions without having skin contact. cECG monitoring can be done using many household objects like chairs, beds and even car seats allowing for seamless monitoring of individuals. This method is unfortunately highly susceptible to motion artifacts which greatly limits its usage in clinical practice. The current use of cECG systems has been limited to performing rhythmic analysis. In this paper we propose a novel end-to-end deep learning architecture to perform the task of denoising capacitive ECG. The proposed network is trained using motion corrupted three channel cECG and a reference LEAD I ECG collected on individuals while driving a car. Further, we also propose a novel joint loss function to apply loss on both signal and frequency domain. We conduct extensive rhythmic analysis on the model predictions and the ground truth. We further evaluate the signal denoising using Mean Square Error(MSE) and Cross Correlation between model predictions and ground truth. We report MSE of 0.167 and Cross Correlation of 0.476. The reported results highlight the feasibility of performing morphological analysis using the filtered cECG. The proposed approach can allow for continuous and comprehensive monitoring of the individuals in free living conditions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1903.12536v1-abstract-full').style.display = 'none'; document.getElementById('1903.12536v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 March, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted IEEE MEMEA 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1902.04236">arXiv:1902.04236</a> <span> [<a href="https://arxiv.org/pdf/1902.04236">pdf</a>, <a href="https://arxiv.org/format/1902.04236">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> RespNet: A deep learning model for extraction of respiration from photoplethysmogram </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ravichandran%2C+V">Vignesh Ravichandran</a>, <a href="/search/cs?searchtype=author&query=Murugesan%2C+B">Balamurali Murugesan</a>, <a href="/search/cs?searchtype=author&query=Balakarthikeyan%2C+V">Vaishali Balakarthikeyan</a>, <a href="/search/cs?searchtype=author&query=Shankaranarayana%2C+S+M">Sharath M Shankaranarayana</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=P%2C+P+S">Preejith S. P</a>, <a href="/search/cs?searchtype=author&query=Joseph%2C+J">Jayaraj Joseph</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1902.04236v2-abstract-short" style="display: inline;"> Respiratory ailments afflict a wide range of people and manifests itself through conditions like asthma and sleep apnea. Continuous monitoring of chronic respiratory ailments is seldom used outside the intensive care ward due to the large size and cost of the monitoring system. While Electrocardiogram (ECG) based respiration extraction is a validated approach, its adoption is limited by access to… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.04236v2-abstract-full').style.display = 'inline'; document.getElementById('1902.04236v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1902.04236v2-abstract-full" style="display: none;"> Respiratory ailments afflict a wide range of people and manifests itself through conditions like asthma and sleep apnea. Continuous monitoring of chronic respiratory ailments is seldom used outside the intensive care ward due to the large size and cost of the monitoring system. While Electrocardiogram (ECG) based respiration extraction is a validated approach, its adoption is limited by access to a suitable continuous ECG monitor. Recently, due to the widespread adoption of wearable smartwatches with in-built Photoplethysmogram (PPG) sensor, it is being considered as a viable candidate for continuous and unobtrusive respiration monitoring. Research in this domain, however, has been predominantly focussed on estimating respiration rate from PPG. In this work, a novel end-to-end deep learning network called RespNet is proposed to perform the task of extracting the respiration signal from a given input PPG as opposed to extracting respiration rate. The proposed network was trained and tested on two different datasets utilizing different modalities of reference respiration signal recordings. Also, the similarity and performance of the proposed network against two conventional signal processing approaches for extracting respiration signal were studied. The proposed method was tested on two independent datasets with a Mean Squared Error of 0.262 and 0.145. The Cross-Correlation coefficient of the respective datasets were found to be 0.933 and 0.931. The reported errors and similarity was found to be better than conventional approaches. The proposed approach would aid clinicians to provide comprehensive evaluation of sleep-related respiratory conditions and chronic respiratory ailments while being comfortable and inexpensive for the patient. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.04236v2-abstract-full').style.display = 'none'; document.getElementById('1902.04236v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under review at EMBC</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1902.04099">arXiv:1902.04099</a> <span> [<a href="https://arxiv.org/pdf/1902.04099">pdf</a>, <a href="https://arxiv.org/format/1902.04099">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Psi-Net: Shape and boundary aware joint multi-task deep network for medical image segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Murugesan%2C+B">Balamurali Murugesan</a>, <a href="/search/cs?searchtype=author&query=Sarveswaran%2C+K">Kaushik Sarveswaran</a>, <a href="/search/cs?searchtype=author&query=Shankaranarayana%2C+S+M">Sharath M Shankaranarayana</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1902.04099v3-abstract-short" style="display: inline;"> Image segmentation is a primary task in many medical applications. Recently, many deep networks derived from U-Net have been extensively used in various medical image segmentation tasks. However, in most of the cases, networks similar to U-net produce coarse and non-smooth segmentations with lots of discontinuities. To improve and refine the performance of U-Net like networks, we propose the use o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.04099v3-abstract-full').style.display = 'inline'; document.getElementById('1902.04099v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1902.04099v3-abstract-full" style="display: none;"> Image segmentation is a primary task in many medical applications. Recently, many deep networks derived from U-Net have been extensively used in various medical image segmentation tasks. However, in most of the cases, networks similar to U-net produce coarse and non-smooth segmentations with lots of discontinuities. To improve and refine the performance of U-Net like networks, we propose the use of parallel decoders which along with performing the mask predictions also perform contour prediction and distance map estimation. The contour and distance map aid in ensuring smoothness in the segmentation predictions. To facilitate joint training of three tasks, we propose a novel architecture called Psi-Net with a single encoder and three parallel decoders (thus having a shape of $唯$), one decoder to learns the segmentation mask prediction and other two decoders to learn the auxiliary tasks of contour detection and distance map estimation. The learning of these auxiliary tasks helps in capturing the shape and the boundary information. We also propose a new joint loss function for the proposed architecture. The loss function consists of a weighted combination of Negative Log likelihood and Mean Square Error loss. We have used two publicly available datasets: 1) Origa dataset for the task of optic cup and disc segmentation and 2) Endovis segment dataset for the task of polyp segmentation to evaluate our model. We have conducted extensive experiments using our network to show our model gives better results in terms of segmentation, boundary and shape metrics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.04099v3-abstract-full').style.display = 'none'; document.getElementById('1902.04099v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at EMBC 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1902.01040">arXiv:1902.01040</a> <span> [<a href="https://arxiv.org/pdf/1902.01040">pdf</a>, <a href="https://arxiv.org/format/1902.01040">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Fully Convolutional Networks for Monocular Retinal Depth Estimation and Optic Disc-Cup Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shankaranarayana%2C+S+M">Sharath M Shankaranarayana</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Mitra%2C+K">Kaushik Mitra</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1902.01040v1-abstract-short" style="display: inline;"> Glaucoma is a serious ocular disorder for which the screening and diagnosis are carried out by the examination of the optic nerve head (ONH). The color fundus image (CFI) is the most common modality used for ocular screening. In CFI, the central r </span> <span class="abstract-full has-text-grey-dark mathjax" id="1902.01040v1-abstract-full" style="display: none;"> Glaucoma is a serious ocular disorder for which the screening and diagnosis are carried out by the examination of the optic nerve head (ONH). The color fundus image (CFI) is the most common modality used for ocular screening. In CFI, the central r <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.01040v1-abstract-full').style.display = 'none'; document.getElementById('1902.01040v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under review in IEEE JBHI</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1901.08824">arXiv:1901.08824</a> <span> [<a href="https://arxiv.org/pdf/1901.08824">pdf</a>, <a href="https://arxiv.org/format/1901.08824">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Joint shape learning and segmentation for medical images using a minimalistic deep network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Murugesan%2C+B">Balamurali Murugesan</a>, <a href="/search/cs?searchtype=author&query=Sarveswaran%2C+K">Kaushik Sarveswaran</a>, <a href="/search/cs?searchtype=author&query=Shankaranarayana%2C+S+M">Sharath M Shankaranarayana</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Keerthi Ram</a>, <a href="/search/cs?searchtype=author&query=Sivaprakasam%2C+M">Mohanasankar Sivaprakasam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1901.08824v1-abstract-short" style="display: inline;"> Recently, state-of-the-art results have been achieved in semantic segmentation using fully convolutional networks (FCNs). Most of these networks employ encoder-decoder style architecture similar to U-Net and are trained with images and the corresponding segmentation maps as a pixel-wise classification task. Such frameworks only exploit class information by using the ground truth segmentation maps.… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.08824v1-abstract-full').style.display = 'inline'; document.getElementById('1901.08824v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1901.08824v1-abstract-full" style="display: none;"> Recently, state-of-the-art results have been achieved in semantic segmentation using fully convolutional networks (FCNs). Most of these networks employ encoder-decoder style architecture similar to U-Net and are trained with images and the corresponding segmentation maps as a pixel-wise classification task. Such frameworks only exploit class information by using the ground truth segmentation maps. In this paper, we propose a multi-task learning framework with the main aim of exploiting structural and spatial information along with the class information. We modify the decoder part of the FCN to exploit class information and the structural information as well. We intend to do this while also keeping the parameters of the network as low as possible. We obtain the structural information using either of the two ways: i) using the contour map and ii) using the distance map, both of which can be obtained from ground truth segmentation maps with no additional annotation costs. We also explore different ways in which distance maps can be computed and study the effects of different distance maps on the segmentation performance. We also experiment extensively on two different medical image segmentation applications: i.e i) using color fundus images for optic disc and cup segmentation and ii) using endoscopic images for polyp segmentation. Through our experiments, we report results comparable to, and in some cases performing better than the current state-of-the-art architectures and with an order of 2x reduction in the number of parameters. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.08824v1-abstract-full').style.display = 'none'; document.getElementById('1901.08824v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 January, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under review at MIDL 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.04406">arXiv:1811.04406</a> <span> [<a href="https://arxiv.org/pdf/1811.04406">pdf</a>, <a href="https://arxiv.org/format/1811.04406">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3293353.3293383">10.1145/3293353.3293383 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> HSD-CNN: Hierarchically self decomposing CNN architecture using class specific filter sensitivity analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ram%2C+K+S">K. Sai Ram</a>, <a href="/search/cs?searchtype=author&query=Mukherjee%2C+J">Jayanta Mukherjee</a>, <a href="/search/cs?searchtype=author&query=Patra%2C+A">Amit Patra</a>, <a href="/search/cs?searchtype=author&query=Das%2C+P+P">Partha Pratim Das</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.04406v2-abstract-short" style="display: inline;"> Conventional Convolutional neural networks (CNN) are trained on large domain datasets and are hence typically over-represented and inefficient in limited class applications. An efficient way to convert such large many-class pre-trained networks into small few-class networks is through a hierarchical decomposition of its feature maps. To alleviate this issue, we propose an automated framework for s… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.04406v2-abstract-full').style.display = 'inline'; document.getElementById('1811.04406v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.04406v2-abstract-full" style="display: none;"> Conventional Convolutional neural networks (CNN) are trained on large domain datasets and are hence typically over-represented and inefficient in limited class applications. An efficient way to convert such large many-class pre-trained networks into small few-class networks is through a hierarchical decomposition of its feature maps. To alleviate this issue, we propose an automated framework for such decomposition in Hierarchically Self Decomposing CNN (HSD-CNN), in four steps. HSD-CNN is derived automatically using a class-specific filter sensitivity analysis that quantifies the impact of specific features on a class prediction. The decomposed hierarchical network can be utilized and deployed directly to obtain sub-networks for a subset of classes, and it is shown to perform better without the requirement of retraining these sub-networks. Experimental results show that HSD-CNN generally does not degrade accuracy if the full set of classes are used. Interestingly, when operating on known subsets of classes, HSD-CNN has an improvement in accuracy with a much smaller model size, requiring much fewer operations. HSD-CNN flow is verified on the CIFAR10, CIFAR100 and CALTECH101 data sets. We report accuracies up to $85.6\%$ ( $94.75\%$ ) on scenarios with 13 ( 4 ) classes of CIFAR100, using a pre-trained VGG-16 network on the full data set. In this case, the proposed HSD-CNN requires $3.97 \times$ fewer parameters and has $71.22\%$ savings in operations, in comparison to baseline VGG-16 containing features for all 100 classes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.04406v2-abstract-full').style.display = 'none'; document.getElementById('1811.04406v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in ICVGIP,2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1810.13040">arXiv:1810.13040</a> <span> [<a href="https://arxiv.org/pdf/1810.13040">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Enforcing public data archiving policies in academic publishing: A study of ecology journals </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Sholler%2C+D">Dan Sholler</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Karthik Ram</a>, <a href="/search/cs?searchtype=author&query=Boettiger%2C+C">Carl Boettiger</a>, <a href="/search/cs?searchtype=author&query=Katz%2C+D+S">Daniel S. Katz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1810.13040v1-abstract-short" style="display: inline;"> To improve the quality and efficiency of research, groups within the scientific community seek to exploit the value of data sharing. Funders, institutions, and specialist organizations are developing and implementing strategies to encourage or mandate data sharing within and across disciplines, with varying degrees of success. Academic journals in ecology and evolution have adopted several types o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.13040v1-abstract-full').style.display = 'inline'; document.getElementById('1810.13040v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1810.13040v1-abstract-full" style="display: none;"> To improve the quality and efficiency of research, groups within the scientific community seek to exploit the value of data sharing. Funders, institutions, and specialist organizations are developing and implementing strategies to encourage or mandate data sharing within and across disciplines, with varying degrees of success. Academic journals in ecology and evolution have adopted several types of public data archiving policies requiring authors to make data underlying scholarly manuscripts freely available. Yet anecdotes from the community and studies evaluating data availability suggest that these policies have not obtained the desired effects, both in terms of quantity and quality of available datasets. We conducted a qualitative, interview-based study with journal editorial staff and other stakeholders in the academic publishing process to examine how journals enforce data archiving policies. We specifically sought to establish who editors and other stakeholders perceive as responsible for ensuring data completeness and quality in the peer review process. Our analysis revealed little consensus with regard to how data archiving policies should be enforced and who should hold authors accountable for dataset submissions. Themes in interviewee responses included hopefulness that reviewers would take the initiative to review datasets and trust in authors to ensure the completeness and quality of their datasets. We highlight problematic aspects of these thematic responses and offer potential starting points for improvement of the public data archiving process. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.13040v1-abstract-full').style.display = 'none'; document.getElementById('1810.13040v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">35 pages, 1 figure, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1803.08181">arXiv:1803.08181</a> <span> [<a href="https://arxiv.org/pdf/1803.08181">pdf</a>, <a href="https://arxiv.org/format/1803.08181">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/IROS.2018.8593693">10.1109/IROS.2018.8593693 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> CalibNet: Geometrically Supervised Extrinsic Calibration using 3D Spatial Transformer Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Iyer%2C+G">Ganesh Iyer</a>, <a href="/search/cs?searchtype=author&query=Ram.%2C+R+K">R. Karnik Ram.</a>, <a href="/search/cs?searchtype=author&query=Murthy%2C+J+K">J. Krishna Murthy</a>, <a href="/search/cs?searchtype=author&query=Krishna%2C+K+M">K. Madhava Krishna</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1803.08181v2-abstract-short" style="display: inline;"> 3D LiDARs and 2D cameras are increasingly being used alongside each other in sensor rigs for perception tasks. Before these sensors can be used to gather meaningful data, however, their extrinsics (and intrinsics) need to be accurately calibrated, as the performance of the sensor rig is extremely sensitive to these calibration parameters. A vast majority of existing calibration techniques require… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1803.08181v2-abstract-full').style.display = 'inline'; document.getElementById('1803.08181v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1803.08181v2-abstract-full" style="display: none;"> 3D LiDARs and 2D cameras are increasingly being used alongside each other in sensor rigs for perception tasks. Before these sensors can be used to gather meaningful data, however, their extrinsics (and intrinsics) need to be accurately calibrated, as the performance of the sensor rig is extremely sensitive to these calibration parameters. A vast majority of existing calibration techniques require significant amounts of data and/or calibration targets and human effort, severely impacting their applicability in large-scale production systems. We address this gap with CalibNet: a self-supervised deep network capable of automatically estimating the 6-DoF rigid body transformation between a 3D LiDAR and a 2D camera in real-time. CalibNet alleviates the need for calibration targets, thereby resulting in significant savings in calibration efforts. During training, the network only takes as input a LiDAR point cloud, the corresponding monocular image, and the camera calibration matrix K. At train time, we do not impose direct supervision (i.e., we do not directly regress to the calibration parameters, for example). Instead, we train the network to predict calibration parameters that maximize the geometric and photometric consistency of the input images and point clouds. CalibNet learns to iteratively solve the underlying geometric problem and accurately predicts extrinsic calibration parameters for a wide range of mis-calibrations, without requiring retraining or domain adaptation. The project page is hosted at https://epiception.github.io/CalibNet <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1803.08181v2-abstract-full').style.display = 'none'; document.getElementById('1803.08181v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 March, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Appeared in the proccedings of the IEEE International Conference on Intelligent Robots and Systems (IROS) 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1711.00028">arXiv:1711.00028</a> <span> [<a href="https://arxiv.org/pdf/1711.00028">pdf</a>, <a href="https://arxiv.org/format/1711.00028">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics Education">physics.ed-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1073/pnas.1717196115">10.1073/pnas.1717196115 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Hack Weeks as a model for Data Science Education and Collaboration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Huppenkothen%2C+D">Daniela Huppenkothen</a>, <a href="/search/cs?searchtype=author&query=Arendt%2C+A">Anthony Arendt</a>, <a href="/search/cs?searchtype=author&query=Hogg%2C+D+W">David W. Hogg</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Karthik Ram</a>, <a href="/search/cs?searchtype=author&query=VanderPlas%2C+J">Jake VanderPlas</a>, <a href="/search/cs?searchtype=author&query=Rokem%2C+A">Ariel Rokem</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1711.00028v1-abstract-short" style="display: inline;"> Across almost all scientific disciplines, the instruments that record our experimental data and the methods required for storage and data analysis are rapidly increasing in complexity. This gives rise to the need for scientific communities to adapt on shorter time scales than traditional university curricula allow for, and therefore requires new modes of knowledge transfer. The universal applicabi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1711.00028v1-abstract-full').style.display = 'inline'; document.getElementById('1711.00028v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1711.00028v1-abstract-full" style="display: none;"> Across almost all scientific disciplines, the instruments that record our experimental data and the methods required for storage and data analysis are rapidly increasing in complexity. This gives rise to the need for scientific communities to adapt on shorter time scales than traditional university curricula allow for, and therefore requires new modes of knowledge transfer. The universal applicability of data science tools to a broad range of problems has generated new opportunities to foster exchange of ideas and computational workflows across disciplines. In recent years, hack weeks have emerged as an effective tool for fostering these exchanges by providing training in modern data analysis workflows. While there are variations in hack week implementation, all events consist of a common core of three components: tutorials in state-of-the-art methodology, peer-learning and project work in a collaborative environment. In this paper, we present the concept of a hack week in the larger context of scientific meetings and point out similarities and differences to traditional conferences. We motivate the need for such an event and present in detail its strengths and challenges. We find that hack weeks are successful at cultivating collaboration and the exchange of knowledge. Participants self-report that these events help them both in their day-to-day research as well as their careers. Based on our results, we conclude that hack weeks present an effective, easy-to-implement, fairly low-cost tool to positively impact data analysis literacy in academic disciplines, foster collaboration and cultivate best practices. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1711.00028v1-abstract-full').style.display = 'none'; document.getElementById('1711.00028v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 2 figures, submitted to PNAS, all relevant code available at https://github.com/uwescience/HackWeek-Writeup</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1707.04393">arXiv:1707.04393</a> <span> [<a href="https://arxiv.org/pdf/1707.04393">pdf</a>, <a href="https://arxiv.org/format/1707.04393">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Digital Libraries">cs.DL</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.7717/peerj-cs.142">10.7717/peerj-cs.142 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Sustainable computational science: the ReScience initiative </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Rougier%2C+N+P">Nicolas P. Rougier</a>, <a href="/search/cs?searchtype=author&query=Hinsen%2C+K">Konrad Hinsen</a>, <a href="/search/cs?searchtype=author&query=Alexandre%2C+F">Fr茅d茅ric Alexandre</a>, <a href="/search/cs?searchtype=author&query=Arildsen%2C+T">Thomas Arildsen</a>, <a href="/search/cs?searchtype=author&query=Barba%2C+L">Lorena Barba</a>, <a href="/search/cs?searchtype=author&query=Benureau%2C+F+C+Y">Fabien C. Y. Benureau</a>, <a href="/search/cs?searchtype=author&query=Brown%2C+C+T">C. Titus Brown</a>, <a href="/search/cs?searchtype=author&query=de+Buyl%2C+P">Pierre de Buyl</a>, <a href="/search/cs?searchtype=author&query=Caglayan%2C+O">Ozan Caglayan</a>, <a href="/search/cs?searchtype=author&query=Davison%2C+A+P">Andrew P. Davison</a>, <a href="/search/cs?searchtype=author&query=Delsuc%2C+M+A">Marc Andr茅 Delsuc</a>, <a href="/search/cs?searchtype=author&query=Detorakis%2C+G">Georgios Detorakis</a>, <a href="/search/cs?searchtype=author&query=Diem%2C+A+K">Alexandra K. Diem</a>, <a href="/search/cs?searchtype=author&query=Drix%2C+D">Damien Drix</a>, <a href="/search/cs?searchtype=author&query=Enel%2C+P">Pierre Enel</a>, <a href="/search/cs?searchtype=author&query=Girard%2C+B">Beno卯t Girard</a>, <a href="/search/cs?searchtype=author&query=Guest%2C+O">Olivia Guest</a>, <a href="/search/cs?searchtype=author&query=Hall%2C+M+G">Matt G. Hall</a>, <a href="/search/cs?searchtype=author&query=Henriques%2C+R+N">Rafael Neto Henriques</a>, <a href="/search/cs?searchtype=author&query=Hinaut%2C+X">Xavier Hinaut</a>, <a href="/search/cs?searchtype=author&query=Jaron%2C+K+S">Kamil S Jaron</a>, <a href="/search/cs?searchtype=author&query=Khamassi%2C+M">Mehdi Khamassi</a>, <a href="/search/cs?searchtype=author&query=Klein%2C+A">Almar Klein</a>, <a href="/search/cs?searchtype=author&query=Manninen%2C+T">Tiina Manninen</a>, <a href="/search/cs?searchtype=author&query=Marchesi%2C+P">Pietro Marchesi</a> , et al. (20 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1707.04393v2-abstract-short" style="display: inline;"> Computer science offers a large set of tools for prototyping, writing, running, testing, validating, sharing and reproducing results, however computational science lags behind. In the best case, authors may provide their source code as a compressed archive and they may feel confident their research is reproducible. But this is not exactly true. James Buckheit and David Donoho proposed more than tw… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.04393v2-abstract-full').style.display = 'inline'; document.getElementById('1707.04393v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1707.04393v2-abstract-full" style="display: none;"> Computer science offers a large set of tools for prototyping, writing, running, testing, validating, sharing and reproducing results, however computational science lags behind. In the best case, authors may provide their source code as a compressed archive and they may feel confident their research is reproducible. But this is not exactly true. James Buckheit and David Donoho proposed more than two decades ago that an article about computational results is advertising, not scholarship. The actual scholarship is the full software environment, code, and data that produced the result. This implies new workflows, in particular in peer-reviews. Existing journals have been slow to adapt: source codes are rarely requested, hardly ever actually executed to check that they produce the results advertised in the article. ReScience is a peer-reviewed journal that targets computational research and encourages the explicit replication of already published research, promoting new and open-source implementations in order to ensure that the original research can be replicated from its description. To achieve this goal, the whole publishing chain is radically different from other traditional scientific journals. ReScience resides on GitHub where each new implementation of a computational study is made available together with comments, explanations, and software tests. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.04393v2-abstract-full').style.display = 'none'; document.getElementById('1707.04393v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 1 figure</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> PeerJ Computer Science 3:e142 (2017) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1707.02264">arXiv:1707.02264</a> <span> [<a href="https://arxiv.org/pdf/1707.02264">pdf</a>, <a href="https://arxiv.org/format/1707.02264">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Digital Libraries">cs.DL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.7717/peerj-cs.147">10.7717/peerj-cs.147 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Journal of Open Source Software (JOSS): design and first-year review </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Smith%2C+A+M">Arfon M Smith</a>, <a href="/search/cs?searchtype=author&query=Niemeyer%2C+K+E">Kyle E Niemeyer</a>, <a href="/search/cs?searchtype=author&query=Katz%2C+D+S">Daniel S Katz</a>, <a href="/search/cs?searchtype=author&query=Barba%2C+L+A">Lorena A Barba</a>, <a href="/search/cs?searchtype=author&query=Githinji%2C+G">George Githinji</a>, <a href="/search/cs?searchtype=author&query=Gymrek%2C+M">Melissa Gymrek</a>, <a href="/search/cs?searchtype=author&query=Huff%2C+K+D">Kathryn D Huff</a>, <a href="/search/cs?searchtype=author&query=Madan%2C+C+R">Christopher R Madan</a>, <a href="/search/cs?searchtype=author&query=Mayes%2C+A+C">Abigail Cabunoc Mayes</a>, <a href="/search/cs?searchtype=author&query=Moerman%2C+K+M">Kevin M Moerman</a>, <a href="/search/cs?searchtype=author&query=Prins%2C+P">Pjotr Prins</a>, <a href="/search/cs?searchtype=author&query=Ram%2C+K">Karthik Ram</a>, <a href="/search/cs?searchtype=author&query=Rokem%2C+A">Ariel Rokem</a>, <a href="/search/cs?searchtype=author&query=Teal%2C+T+K">Tracy K Teal</a>, <a href="/search/cs?searchtype=author&query=Guimera%2C+R+V">Roman Valls Guimera</a>, <a href="/search/cs?searchtype=author&query=Vanderplas%2C+J+T">Jacob T Vanderplas</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1707.02264v3-abstract-short" style="display: inline;"> This article describes the motivation, design, and progress of the Journal of Open Source Software (JOSS). JOSS is a free and open-access journal that publishes articles describing research software. It has the dual goals of improving the quality of the software submitted and providing a mechanism for research software developers to receive credit. While designed to work within the current merit s… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.02264v3-abstract-full').style.display = 'inline'; document.getElementById('1707.02264v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1707.02264v3-abstract-full" style="display: none;"> This article describes the motivation, design, and progress of the Journal of Open Source Software (JOSS). JOSS is a free and open-access journal that publishes articles describing research software. It has the dual goals of improving the quality of the software submitted and providing a mechanism for research software developers to receive credit. While designed to work within the current merit system of science, JOSS addresses the dearth of rewards for key contributions to science made in the form of software. JOSS publishes articles that encapsulate scholarship contained in the software itself, and its rigorous peer review targets the software components: functionality, documentation, tests, continuous integration, and the license. A JOSS article contains an abstract describing the purpose and functionality of the software, references, and a link to the software archive. The article is the entry point of a JOSS submission, which encompasses the full set of software artifacts. Submission and review proceed in the open, on GitHub. Editors, reviewers, and authors work collaboratively and openly. Unlike other journals, JOSS does not reject articles requiring major revision; while not yet accepted, articles remain visible and under review until the authors make adequate changes (or withdraw, if unable to meet requirements). Once an article is accepted, JOSS gives it a DOI, deposits its metadata in Crossref, and the article can begin collecting citations on indexers like Google Scholar and other services. Authors retain copyright of their JOSS article, releasing it under a Creative Commons Attribution 4.0 International License. In its first year, starting in May 2016, JOSS published 111 articles, with more than 40 additional articles under review. JOSS is a sponsored project of the nonprofit organization NumFOCUS and is an affiliate of the Open Source Initiative. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.02264v3-abstract-full').style.display = 'none'; document.getElementById('1707.02264v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 January, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 8 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> PeerJ Computer Science 4 (2018) e147 </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>