CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;37 of 37 results for author: <span class="mathjax">Duan, W</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Duan%2C+W">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Duan, W"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Duan%2C+W&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Duan, W"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.08471">arXiv:2410.08471</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.08471">pdf</a>, <a href="https://arxiv.org/format/2410.08471">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Formal Languages and Automata Theory">cs.FL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Opacity Enforcement by Edit Functions Under Incomparable Observations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+R">Ruotian Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Fanti%2C+M+P">Maria Pia Fanti</a>, <a href="/search/cs?searchtype=author&amp;query=Hadjicostis%2C+C+N">Christoforos N. Hadjicostis</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zhiwu Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.08471v1-abstract-short" style="display: inline;"> As an information-flow privacy property, opacity characterizes whether a malicious external observer (referred to as an intruder) is able to infer the secret behavior of a system. This paper addresses the problem of opacity enforcement using edit functions in discrete event systems modeled by partially observed deterministic finite automata. A defender uses the edit function as an interface at the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.08471v1-abstract-full').style.display = 'inline'; document.getElementById('2410.08471v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.08471v1-abstract-full" style="display: none;"> As an information-flow privacy property, opacity characterizes whether a malicious external observer (referred to as an intruder) is able to infer the secret behavior of a system. This paper addresses the problem of opacity enforcement using edit functions in discrete event systems modeled by partially observed deterministic finite automata. A defender uses the edit function as an interface at the output of a system to manipulate actual observations through insertion, substitution, and deletion operations so that the intruder will be prevented from inferring the secret behavior of the system. Unlike existing work which usually assumes that the observation capabilities of the intruder and the defender are identical, we consider a more general setting where they may observe incomparable subsets of events generated by the system.To characterize whether the defender has the ability to enforce opacity of the system under this setting, the notion of \emph{$ic$-enforceability} is introduced. Then, the opacity enforcement problem is transformed to a two-player game, with imperfect information between the system and the defender, which can be used to determine a feasible decision-making strategy for the defender. Within the game scheme, an edit mechanism is constructed to enumerate all feasible edit actions following system behavior. We further show that an $ic$-enforcing edit function (if one exists) can be synthesized from the edit mechanism to enforce opacity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.08471v1-abstract-full').style.display = 'none'; document.getElementById('2410.08471v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.04482">arXiv:2409.04482</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.04482">pdf</a>, <a href="https://arxiv.org/format/2409.04482">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SCARF: Scalable Continual Learning Framework for Memory-efficient Multiple Neural Radiance Fields </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yuze Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Junyi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wantong Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Bao%2C+Y">Yongtang Bao</a>, <a href="/search/cs?searchtype=author&amp;query=Qi%2C+Y">Yue Qi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.04482v1-abstract-short" style="display: inline;"> This paper introduces a novel continual learning framework for synthesising novel views of multiple scenes, learning multiple 3D scenes incrementally, and updating the network parameters only with the training data of the upcoming new scene. We build on Neural Radiance Fields (NeRF), which uses multi-layer perceptron to model the density and radiance field of a scene as the implicit function. Whil&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.04482v1-abstract-full').style.display = 'inline'; document.getElementById('2409.04482v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.04482v1-abstract-full" style="display: none;"> This paper introduces a novel continual learning framework for synthesising novel views of multiple scenes, learning multiple 3D scenes incrementally, and updating the network parameters only with the training data of the upcoming new scene. We build on Neural Radiance Fields (NeRF), which uses multi-layer perceptron to model the density and radiance field of a scene as the implicit function. While NeRF and its extensions have shown a powerful capability of rendering photo-realistic novel views in a single 3D scene, managing these growing 3D NeRF assets efficiently is a new scientific problem. Very few works focus on the efficient representation or continuous learning capability of multiple scenes, which is crucial for the practical applications of NeRF. To achieve these goals, our key idea is to represent multiple scenes as the linear combination of a cross-scene weight matrix and a set of scene-specific weight matrices generated from a global parameter generator. Furthermore, we propose an uncertain surface knowledge distillation strategy to transfer the radiance field knowledge of previous scenes to the new model. Representing multiple 3D scenes with such weight matrices significantly reduces memory requirements. At the same time, the uncertain surface distillation strategy greatly overcomes the catastrophic forgetting problem and maintains the photo-realistic rendering quality of previous scenes. Experiments show that the proposed approach achieves state-of-the-art rendering quality of continual learning NeRF on NeRF-Synthetic, LLFF, and TanksAndTemples datasets while preserving extra low storage cost. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.04482v1-abstract-full').style.display = 'none'; document.getElementById('2409.04482v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.11787">arXiv:2408.11787</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.11787">pdf</a>, <a href="https://arxiv.org/format/2408.11787">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> NuSegDG: Integration of Heterogeneous Space and Gaussian Kernel for Domain-Generalized Nuclei Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lou%2C+Z">Zhenye Lou</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Q">Qing Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zekun Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+X">Xiangjian He</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zhen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+C">Chenxin Li</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+M+M">Maggie M. He</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenting Duan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.11787v2-abstract-short" style="display: inline;"> Domain-generalized nuclei segmentation refers to the generalizability of models to unseen domains based on knowledge learned from source domains and is challenged by various image conditions, cell types, and stain strategies. Recently, the Segment Anything Model (SAM) has made great success in universal image segmentation by interactive prompt modes (e.g., point and box). Despite its strengths, th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11787v2-abstract-full').style.display = 'inline'; document.getElementById('2408.11787v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.11787v2-abstract-full" style="display: none;"> Domain-generalized nuclei segmentation refers to the generalizability of models to unseen domains based on knowledge learned from source domains and is challenged by various image conditions, cell types, and stain strategies. Recently, the Segment Anything Model (SAM) has made great success in universal image segmentation by interactive prompt modes (e.g., point and box). Despite its strengths, the original SAM presents limited adaptation to medical images. Moreover, SAM requires providing manual bounding box prompts for each object to produce satisfactory segmentation masks, so it is laborious in nuclei segmentation scenarios. To address these limitations, we propose a domain-generalizable framework for nuclei image segmentation, abbreviated to NuSegDG. Specifically, we first devise a Heterogeneous Space Adapter (HS-Adapter) to learn multi-dimensional feature representations of different nuclei domains by injecting a small number of trainable parameters into the image encoder of SAM. To alleviate the labor-intensive requirement of manual prompts, we introduce a Gaussian-Kernel Prompt Encoder (GKP-Encoder) to generate density maps driven by a single point, which guides segmentation predictions by mixing position prompts and semantic prompts. Furthermore, we present a Two-Stage Mask Decoder (TSM-Decoder) to effectively convert semantic masks to instance maps without the manual demand for morphological shape refinement. Based on our experimental evaluations, the proposed NuSegDG demonstrates state-of-the-art performance in nuclei instance segmentation, exhibiting superior domain generalization capabilities. The source code is available at https://github.com/xq141839/NuSegDG. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11787v2-abstract-full').style.display = 'none'; document.getElementById('2408.11787v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under Reivew</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.14153">arXiv:2407.14153</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.14153">pdf</a>, <a href="https://arxiv.org/format/2407.14153">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ESP-MedSAM: Efficient Self-Prompting SAM for Universal Domain-Generalized Medical Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Q">Qing Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+J">Jiaxuan Li</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+X">Xiangjian He</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Ziyu Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zhen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenting Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+C">Chenxin Li</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+M+M">Maggie M. He</a>, <a href="/search/cs?searchtype=author&amp;query=Tesema%2C+F+B">Fiseha B. Tesema</a>, <a href="/search/cs?searchtype=author&amp;query=Cheah%2C+W+P">Wooi P. Cheah</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Qu%2C+R">Rong Qu</a>, <a href="/search/cs?searchtype=author&amp;query=Garibaldi%2C+J+M">Jonathan M. Garibaldi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.14153v4-abstract-short" style="display: inline;"> The universality of deep neural networks across different modalities and their generalization capabilities to unseen domains play an essential role in medical image segmentation. The recent Segment Anything Model (SAM) has demonstrated its potential in both settings. However, the huge computational costs, demand for manual annotations as prompts and conflict-prone decoding process of SAM degrade i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.14153v4-abstract-full').style.display = 'inline'; document.getElementById('2407.14153v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.14153v4-abstract-full" style="display: none;"> The universality of deep neural networks across different modalities and their generalization capabilities to unseen domains play an essential role in medical image segmentation. The recent Segment Anything Model (SAM) has demonstrated its potential in both settings. However, the huge computational costs, demand for manual annotations as prompts and conflict-prone decoding process of SAM degrade its generalizability and applicability in clinical scenarios. To address these issues, we propose an efficient self-prompting SAM for universal domain-generalized medical image segmentation, named ESP-MedSAM. Specifically, we first devise the Multi-Modal Decoupled Knowledge Distillation (MMDKD) strategy to construct a lightweight semi-parameter sharing image encoder that produces discriminative visual features for diverse modalities. Further, we introduce the Self-Patch Prompt Generator (SPPG) to automatically generate high-quality dense prompt embeddings for guiding segmentation decoding. Finally, we design the Query-Decoupled Modality Decoder (QDMD) that leverages a one-to-one strategy to provide an independent decoding channel for every modality. Extensive experiments indicate that ESP-MedSAM outperforms state-of-the-arts in diverse medical imaging segmentation tasks, displaying superior modality universality and generalization capabilities. Especially, ESP-MedSAM uses only 4.5\% parameters compared to SAM-H. The source code is available at https://github.com/xq141839/ESP-MedSAM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.14153v4-abstract-full').style.display = 'none'; document.getElementById('2407.14153v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under Review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.08287">arXiv:2406.08287</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.08287">pdf</a>, <a href="https://arxiv.org/format/2406.08287">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Pre-Training Identification of Graph Winning Tickets in Adaptive Spatial-Temporal Graph Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenying Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+T">Tianxiang Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Rao%2C+H">Hong Rao</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+X">Xiaoxi He</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.08287v2-abstract-short" style="display: inline;"> In this paper, we present a novel method to significantly enhance the computational efficiency of Adaptive Spatial-Temporal Graph Neural Networks (ASTGNNs) by introducing the concept of the Graph Winning Ticket (GWT), derived from the Lottery Ticket Hypothesis (LTH). By adopting a pre-determined star topology as a GWT prior to training, we balance edge reduction with efficient information propagat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.08287v2-abstract-full').style.display = 'inline'; document.getElementById('2406.08287v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.08287v2-abstract-full" style="display: none;"> In this paper, we present a novel method to significantly enhance the computational efficiency of Adaptive Spatial-Temporal Graph Neural Networks (ASTGNNs) by introducing the concept of the Graph Winning Ticket (GWT), derived from the Lottery Ticket Hypothesis (LTH). By adopting a pre-determined star topology as a GWT prior to training, we balance edge reduction with efficient information propagation, reducing computational demands while maintaining high model performance. Both the time and memory computational complexity of generating adaptive spatial-temporal graphs is significantly reduced from $\mathcal{O}(N^2)$ to $\mathcal{O}(N)$. Our approach streamlines the ASTGNN deployment by eliminating the need for exhaustive training, pruning, and retraining cycles, and demonstrates empirically across various datasets that it is possible to achieve comparable performance to full models with substantially lower computational costs. Specifically, our approach enables training ASTGNNs on the largest scale spatial-temporal dataset using a single A6000 equipped with 48 GB of memory, overcoming the out-of-memory issue encountered during original training and even achieving state-of-the-art performance. Furthermore, we delve into the effectiveness of the GWT from the perspective of spectral graph theory, providing substantial theoretical support. This advancement not only proves the existence of efficient sub-networks within ASTGNNs but also broadens the applicability of the LTH in resource-constrained settings, marking a significant step forward in the field of graph neural networks. Code is available at https://anonymous.4open.science/r/paper-1430. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.08287v2-abstract-full').style.display = 'none'; document.getElementById('2406.08287v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Conference paper, accepted by KDD&#39; 24</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.06949">arXiv:2406.06949</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.06949">pdf</a>, <a href="https://arxiv.org/format/2406.06949">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TGRS.2024.3452175">10.1109/TGRS.2024.3452175 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Triple-domain Feature Learning with Frequency-aware Memory Enhancement for Moving Infrared Small Target Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Weiwei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+L">Luping Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+S">Shengjia Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+S">Sicheng Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+M">Mao Ye</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.06949v2-abstract-short" style="display: inline;"> As a sub-field of object detection, moving infrared small target detection presents significant challenges due to tiny target sizes and low contrast against backgrounds. Currently-existing methods primarily rely on the features extracted only from spatio-temporal domain. Frequency domain has hardly been concerned yet, although it has been widely applied in image processing. To extend feature sourc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.06949v2-abstract-full').style.display = 'inline'; document.getElementById('2406.06949v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.06949v2-abstract-full" style="display: none;"> As a sub-field of object detection, moving infrared small target detection presents significant challenges due to tiny target sizes and low contrast against backgrounds. Currently-existing methods primarily rely on the features extracted only from spatio-temporal domain. Frequency domain has hardly been concerned yet, although it has been widely applied in image processing. To extend feature source domains and enhance feature representation, we propose a new Triple-domain Strategy (Tridos) with the frequency-aware memory enhancement on spatio-temporal domain for infrared small target detection. In this scheme, it effectively detaches and enhances frequency features by a local-global frequency-aware module with Fourier transform. Inspired by human visual system, our memory enhancement is designed to capture the spatial relations of infrared targets among video frames. Furthermore, it encodes temporal dynamics motion features via differential learning and residual enhancing. Additionally, we further design a residual compensation to reconcile possible cross-domain feature mismatches. To our best knowledge, proposed Tridos is the first work to explore infrared target feature learning comprehensively in spatio-temporal-frequency domains. The extensive experiments on three datasets (i.e., DAUB, ITSDT-15K and IRDST) validate that our triple-domain infrared feature learning scheme could often be obviously superior to state-of-the-art ones. Source codes are available at https://github.com/UESTC-nnLab/Tridos. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.06949v2-abstract-full').style.display = 'none'; document.getElementById('2406.06949v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This paper has accepted IEEE TGRS</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Transactions on Geoscience and Remote Sensing 2024 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.10976">arXiv:2404.10976</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.10976">pdf</a>, <a href="https://arxiv.org/format/2404.10976">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> Group-Aware Coordination Graph for Multi-Agent Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Jie Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Xuan%2C+J">Junyu Xuan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.10976v3-abstract-short" style="display: inline;"> Cooperative Multi-Agent Reinforcement Learning (MARL) necessitates seamless collaboration among agents, often represented by an underlying relation graph. Existing methods for learning this graph primarily focus on agent-pair relations, neglecting higher-order relationships. While several approaches attempt to extend cooperation modelling to encompass behaviour similarities within groups, they com&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.10976v3-abstract-full').style.display = 'inline'; document.getElementById('2404.10976v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.10976v3-abstract-full" style="display: none;"> Cooperative Multi-Agent Reinforcement Learning (MARL) necessitates seamless collaboration among agents, often represented by an underlying relation graph. Existing methods for learning this graph primarily focus on agent-pair relations, neglecting higher-order relationships. While several approaches attempt to extend cooperation modelling to encompass behaviour similarities within groups, they commonly fall short in concurrently learning the latent graph, thereby constraining the information exchange among partially observed agents. To overcome these limitations, we present a novel approach to infer the Group-Aware Coordination Graph (GACG), which is designed to capture both the cooperation between agent pairs based on current observations and group-level dependencies from behaviour patterns observed across trajectories. This graph is further used in graph convolution for information exchange between agents during decision-making. To further ensure behavioural consistency among agents within the same group, we introduce a group distance loss, which promotes group cohesion and encourages specialization between groups. Our evaluations, conducted on StarCraft II micromanagement tasks, demonstrate GACG&#39;s superior performance. An ablation study further provides experimental evidence of the effectiveness of each component of our method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.10976v3-abstract-full').style.display = 'none'; document.getElementById('2404.10976v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by IJCAI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.19253">arXiv:2403.19253</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.19253">pdf</a>, <a href="https://arxiv.org/format/2403.19253">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> Inferring Latent Temporal Sparse Coordination Graph for Multi-Agent Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Jie Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Xuan%2C+J">Junyu Xuan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.19253v1-abstract-short" style="display: inline;"> Effective agent coordination is crucial in cooperative Multi-Agent Reinforcement Learning (MARL). While agent cooperation can be represented by graph structures, prevailing graph learning methods in MARL are limited. They rely solely on one-step observations, neglecting crucial historical experiences, leading to deficient graphs that foster redundant or detrimental information exchanges. Additiona&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19253v1-abstract-full').style.display = 'inline'; document.getElementById('2403.19253v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.19253v1-abstract-full" style="display: none;"> Effective agent coordination is crucial in cooperative Multi-Agent Reinforcement Learning (MARL). While agent cooperation can be represented by graph structures, prevailing graph learning methods in MARL are limited. They rely solely on one-step observations, neglecting crucial historical experiences, leading to deficient graphs that foster redundant or detrimental information exchanges. Additionally, high computational demands for action-pair calculations in dense graphs impede scalability. To address these challenges, we propose inferring a Latent Temporal Sparse Coordination Graph (LTS-CG) for MARL. The LTS-CG leverages agents&#39; historical observations to calculate an agent-pair probability matrix, where a sparse graph is sampled from and used for knowledge exchange between agents, thereby simultaneously capturing agent dependencies and relation uncertainty. The computational complexity of this procedure is only related to the number of agents. This graph learning process is further augmented by two innovative characteristics: Predict-Future, which enables agents to foresee upcoming observations, and Infer-Present, ensuring a thorough grasp of the environmental context from limited data. These features allow LTS-CG to construct temporal graphs from historical and real-time information, promoting knowledge exchange during policy learning and effective collaboration. Graph learning and agent training occur simultaneously in an end-to-end manner. Our demonstrated results on the StarCraft II benchmark underscore LTS-CG&#39;s superior performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19253v1-abstract-full').style.display = 'none'; document.getElementById('2403.19253v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.11408">arXiv:2403.11408</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.11408">pdf</a>, <a href="https://arxiv.org/format/2403.11408">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Layer-diverse Negative Sampling for Graph Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Jie Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y+G">Yu Guang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xuan%2C+J">Junyu Xuan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.11408v1-abstract-short" style="display: inline;"> Graph neural networks (GNNs) are a powerful solution for various structure learning applications due to their strong representation capabilities for graph data. However, traditional GNNs, relying on message-passing mechanisms that gather information exclusively from first-order neighbours (known as positive samples), can lead to issues such as over-smoothing and over-squashing. To mitigate these i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.11408v1-abstract-full').style.display = 'inline'; document.getElementById('2403.11408v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.11408v1-abstract-full" style="display: none;"> Graph neural networks (GNNs) are a powerful solution for various structure learning applications due to their strong representation capabilities for graph data. However, traditional GNNs, relying on message-passing mechanisms that gather information exclusively from first-order neighbours (known as positive samples), can lead to issues such as over-smoothing and over-squashing. To mitigate these issues, we propose a layer-diverse negative sampling method for message-passing propagation. This method employs a sampling matrix within a determinantal point process, which transforms the candidate set into a space and selectively samples from this space to generate negative samples. To further enhance the diversity of the negative samples during each forward pass, we develop a space-squeezing method to achieve layer-wise diversity in multi-layer GNNs. Experiments on various real-world graph datasets demonstrate the effectiveness of our approach in improving the diversity of negative samples and overall learning performance. Moreover, adding negative samples dynamically changes the graph&#39;s topology, thus with the strong potential to improve the expressiveness of GNNs and reduce the risk of over-squashing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.11408v1-abstract-full').style.display = 'none'; document.getElementById('2403.11408v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in Transactions on Machine Learning Research (03/2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.05260">arXiv:2403.05260</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.05260">pdf</a>, <a href="https://arxiv.org/format/2403.05260">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Predicting Single-cell Drug Sensitivity by Adaptive Weighted Feature for Adversarial Multi-source Domain Adaptation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Hui Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.05260v1-abstract-short" style="display: inline;"> The development of single-cell sequencing technology had promoted the generation of a large amount of single-cell transcriptional profiles, providing valuable opportunities to explore drug-resistant cell subpopulations in a tumor. However, the drug sensitivity data in single-cell level is still scarce to date, pressing an urgent and highly challenging task for computational prediction of the drug&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.05260v1-abstract-full').style.display = 'inline'; document.getElementById('2403.05260v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.05260v1-abstract-full" style="display: none;"> The development of single-cell sequencing technology had promoted the generation of a large amount of single-cell transcriptional profiles, providing valuable opportunities to explore drug-resistant cell subpopulations in a tumor. However, the drug sensitivity data in single-cell level is still scarce to date, pressing an urgent and highly challenging task for computational prediction of the drug sensitivity to individual cells. This paper proposed scAdaDrug, a multi-source adaptive weighting model to predict single-cell drug sensitivity. We used an autoencoder to extract domain-invariant features related to drug sensitivity from multiple source domains by exploiting adversarial domain adaptation. Especially, we introduced an adaptive weight generator to produce importance-aware and mutual independent weights, which could adaptively modulate the embedding of each sample in dimension-level for both source and target domains. Extensive experimental results showed that our model achieved state-of-the-art performance in predicting drug sensitivity on sinle-cell datasets, as well as on cell line and patient datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.05260v1-abstract-full').style.display = 'none'; document.getElementById('2403.05260v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.01697">arXiv:2403.01697</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.01697">pdf</a>, <a href="https://arxiv.org/format/2403.01697">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Dismantling Gender Blindness in Online Discussion of a Crime/Gender Dichotomy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Qin%2C+Y">Yigang Qin</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Weilun Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Q">Qunfang Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+Z">Zhicong Lu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.01697v1-abstract-short" style="display: inline;"> Contemporary feminists utilize social media for activism, while backlashes come along. The gender-related discourses are often diminished when addressing public events regarding sexism and gender inequality on social media platforms. The dichotomized debate around the Tangshan beating incident in China epitomized how criminal interpretations of gender-related violence became a backlash against fem&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.01697v1-abstract-full').style.display = 'inline'; document.getElementById('2403.01697v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.01697v1-abstract-full" style="display: none;"> Contemporary feminists utilize social media for activism, while backlashes come along. The gender-related discourses are often diminished when addressing public events regarding sexism and gender inequality on social media platforms. The dichotomized debate around the Tangshan beating incident in China epitomized how criminal interpretations of gender-related violence became a backlash against feminist expressions. By analyzing posts on Weibo using mixed methods, we describe the emerging discursive patterns around crime and gender, uncovering the inherent gender-blind sexism that refutes feminist discourses on the social platform. We also highlight the critical restrictions facing grassroots feminist activism in Chinese cyberspace and propose implications for the design and research related to digital feminist activism. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.01697v1-abstract-full').style.display = 'none'; document.getElementById('2403.01697v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">31 pages, 3 figures, Accepted for publication in Proceedings of the ACM on Human-Computer Interaction (CSCW 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.16366">arXiv:2402.16366</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.16366">pdf</a>, <a href="https://arxiv.org/format/2402.16366">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> SPC-NeRF: Spatial Predictive Compression for Voxel Based Radiance Field </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Song%2C+Z">Zetian Song</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenhong Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuhuai Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shiqi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+S">Siwei Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+W">Wen Gao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.16366v1-abstract-short" style="display: inline;"> Representing the Neural Radiance Field (NeRF) with the explicit voxel grid (EVG) is a promising direction for improving NeRFs. However, the EVG representation is not efficient for storage and transmission because of the terrific memory cost. Current methods for compressing EVG mainly inherit the methods designed for neural network compression, such as pruning and quantization, which do not take fu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.16366v1-abstract-full').style.display = 'inline'; document.getElementById('2402.16366v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.16366v1-abstract-full" style="display: none;"> Representing the Neural Radiance Field (NeRF) with the explicit voxel grid (EVG) is a promising direction for improving NeRFs. However, the EVG representation is not efficient for storage and transmission because of the terrific memory cost. Current methods for compressing EVG mainly inherit the methods designed for neural network compression, such as pruning and quantization, which do not take full advantage of the spatial correlation of voxels. Inspired by prosperous digital image compression techniques, this paper proposes SPC-NeRF, a novel framework applying spatial predictive coding in EVG compression. The proposed framework can remove spatial redundancy efficiently for better compression performance.Moreover, we model the bitrate and design a novel form of the loss function, where we can jointly optimize compression ratio and distortion to achieve higher coding efficiency. Extensive experiments demonstrate that our method can achieve 32% bit saving compared to the state-of-the-art method VQRF on multiple representative test datasets, with comparable training time. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.16366v1-abstract-full').style.display = 'none'; document.getElementById('2402.16366v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.12231">arXiv:2308.12231</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.12231">pdf</a>, <a href="https://arxiv.org/format/2308.12231">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SPPNet: A Single-Point Prompt Network for Nuclei Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Q">Qing Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Kuang%2C+W">Wenwei Kuang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zeyu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Bao%2C+X">Xueyao Bao</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Haoran Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenting Duan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.12231v1-abstract-short" style="display: inline;"> Image segmentation plays an essential role in nuclei image analysis. Recently, the segment anything model has made a significant breakthrough in such tasks. However, the current model exists two major issues for cell segmentation: (1) the image encoder of the segment anything model involves a large number of parameters. Retraining or even fine-tuning the model still requires expensive computationa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.12231v1-abstract-full').style.display = 'inline'; document.getElementById('2308.12231v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.12231v1-abstract-full" style="display: none;"> Image segmentation plays an essential role in nuclei image analysis. Recently, the segment anything model has made a significant breakthrough in such tasks. However, the current model exists two major issues for cell segmentation: (1) the image encoder of the segment anything model involves a large number of parameters. Retraining or even fine-tuning the model still requires expensive computational resources. (2) in point prompt mode, points are sampled from the center of the ground truth and more than one set of points is expected to achieve reliable performance, which is not efficient for practical applications. In this paper, a single-point prompt network is proposed for nuclei image segmentation, called SPPNet. We replace the original image encoder with a lightweight vision transformer. Also, an effective convolutional block is added in parallel to extract the low-level semantic information from the image and compensate for the performance degradation due to the small image encoder. We propose a new point-sampling method based on the Gaussian kernel. The proposed model is evaluated on the MoNuSeg-2018 dataset. The result demonstrated that SPPNet outperforms existing U-shape architectures and shows faster convergence in training. Compared to the segment anything model, SPPNet shows roughly 20 times faster inference, with 1/70 parameters and computational cost. Particularly, only one set of points is required in both the training and inference phases, which is more reasonable for clinical applications. The code for our work and more technical details can be found at https://github.com/xq141839/SPPNet. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.12231v1-abstract-full').style.display = 'none'; document.getElementById('2308.12231v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.10276">arXiv:2308.10276</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.10276">pdf</a>, <a href="https://arxiv.org/format/2308.10276">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Minimalist Traffic Prediction: Linear Layer Is All You Need </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenying Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Rao%2C+H">Hong Rao</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+W">Wei Huang</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+X">Xiaoxi He</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.10276v2-abstract-short" style="display: inline;"> Traffic prediction is essential for the progression of Intelligent Transportation Systems (ITS) and the vision of smart cities. While Spatial-Temporal Graph Neural Networks (STGNNs) have shown promise in this domain by leveraging Graph Neural Networks (GNNs) integrated with either RNNs or Transformers, they present challenges such as computational complexity, gradient issues, and resource-intensiv&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.10276v2-abstract-full').style.display = 'inline'; document.getElementById('2308.10276v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.10276v2-abstract-full" style="display: none;"> Traffic prediction is essential for the progression of Intelligent Transportation Systems (ITS) and the vision of smart cities. While Spatial-Temporal Graph Neural Networks (STGNNs) have shown promise in this domain by leveraging Graph Neural Networks (GNNs) integrated with either RNNs or Transformers, they present challenges such as computational complexity, gradient issues, and resource-intensiveness. This paper addresses these challenges, advocating for three main solutions: a node-embedding approach, time series decomposition, and periodicity learning. We introduce STLinear, a minimalist model architecture designed for optimized efficiency and performance. Unlike traditional STGNNs, STlinear operates fully locally, avoiding inter-node data exchanges, and relies exclusively on linear layers, drastically cutting computational demands. Our empirical studies on real-world datasets confirm STLinear&#39;s prowess, matching or exceeding the accuracy of leading STGNNs, but with significantly reduced complexity and computation overhead (more than 95% reduction in MACs per epoch compared to state-of-the-art STGNN baseline published in 2023). In summary, STLinear emerges as a potent, efficient alternative to conventional STGNNs, with profound implications for the future of ITS and smart city initiatives. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.10276v2-abstract-full').style.display = 'none'; document.getElementById('2308.10276v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.05945">arXiv:2308.05945</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.05945">pdf</a>, <a href="https://arxiv.org/format/2308.05945">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Methodology">stat.ME</span> </div> </div> <p class="title is-5 mathjax"> Improving Ego-Cluster for Network Effect Measurement </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Su%2C+W">Wentao Su</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Weitao Duan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.05945v3-abstract-short" style="display: inline;"> The network effect, wherein one user&#39;s activity impacts another user, is common in social network platforms. Many new features in social networks are specifically designed to create a network effect, enhancing user engagement. For instance, content creators tend to produce more when their articles and posts receive positive feedback from followers. This paper discusses a new cluster-level experime&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.05945v3-abstract-full').style.display = 'inline'; document.getElementById('2308.05945v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.05945v3-abstract-full" style="display: none;"> The network effect, wherein one user&#39;s activity impacts another user, is common in social network platforms. Many new features in social networks are specifically designed to create a network effect, enhancing user engagement. For instance, content creators tend to produce more when their articles and posts receive positive feedback from followers. This paper discusses a new cluster-level experimentation methodology for measuring creator-side metrics in the context of A/B experiments. The methodology is designed to address cases where the experiment randomization unit and the metric measurement unit differ. It is a crucial part of LinkedIn&#39;s overall strategy to foster a robust creator community and ecosystem. The method is developed based on widely-cited research at LinkedIn but significantly improves the efficiency and flexibility of the clustering algorithm. This improvement results in a stronger capability for measuring creator-side metrics and an increased velocity for creator-related experiments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.05945v3-abstract-full').style.display = 'none'; document.getElementById('2308.05945v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.03311">arXiv:2308.03311</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.03311">pdf</a>, <a href="https://arxiv.org/format/2308.03311">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3586183.3606773">10.1145/3586183.3606773 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> CrossTalk: Intelligent Substrates for Language-Oriented Interaction in Video-Based Communication and Collaboration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xia%2C+H">Haijun Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+T">Tony Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Gunturu%2C+A">Aditya Gunturu</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+P">Peiling Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">William Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+X">Xiaoshuo Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.03311v1-abstract-short" style="display: inline;"> Despite the advances and ubiquity of digital communication media such as videoconferencing and virtual reality, they remain oblivious to the rich intentions expressed by users. Beyond transmitting audio, videos, and messages, we envision digital communication media as proactive facilitators that can provide unobtrusive assistance to enhance communication and collaboration. Informed by the results&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.03311v1-abstract-full').style.display = 'inline'; document.getElementById('2308.03311v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.03311v1-abstract-full" style="display: none;"> Despite the advances and ubiquity of digital communication media such as videoconferencing and virtual reality, they remain oblivious to the rich intentions expressed by users. Beyond transmitting audio, videos, and messages, we envision digital communication media as proactive facilitators that can provide unobtrusive assistance to enhance communication and collaboration. Informed by the results of a formative study, we propose three key design concepts to explore the systematic integration of intelligence into communication and collaboration, including the panel substrate, language-based intent recognition, and lightweight interaction techniques. We developed CrossTalk, a videoconferencing system that instantiates these concepts, which was found to enable a more fluid and flexible communication and collaboration experience. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.03311v1-abstract-full').style.display = 'none'; document.getElementById('2308.03311v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.13813">arXiv:2306.13813</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.13813">pdf</a>, <a href="https://arxiv.org/format/2306.13813">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DualAttNet: Synergistic Fusion of Image-level and Fine-Grained Disease Attention for Multi-Label Lesion Detection in Chest X-rays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Q">Qing Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenting Duan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.13813v1-abstract-short" style="display: inline;"> Chest radiographs are the most commonly performed radiological examinations for lesion detection. Recent advances in deep learning have led to encouraging results in various thoracic disease detection tasks. Particularly, the architecture with feature pyramid network performs the ability to recognise targets with different sizes. However, such networks are difficult to focus on lesion regions in c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.13813v1-abstract-full').style.display = 'inline'; document.getElementById('2306.13813v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.13813v1-abstract-full" style="display: none;"> Chest radiographs are the most commonly performed radiological examinations for lesion detection. Recent advances in deep learning have led to encouraging results in various thoracic disease detection tasks. Particularly, the architecture with feature pyramid network performs the ability to recognise targets with different sizes. However, such networks are difficult to focus on lesion regions in chest X-rays due to their high resemblance in vision. In this paper, we propose a dual attention supervised module for multi-label lesion detection in chest radiographs, named DualAttNet. It efficiently fuses global and local lesion classification information based on an image-level attention block and a fine-grained disease attention algorithm. A binary cross entropy loss function is used to calculate the difference between the attention map and ground truth at image level. The generated gradient flow is leveraged to refine pyramid representations and highlight lesion-related features. We evaluate the proposed model on VinDr-CXR, ChestX-ray8 and COVID-19 datasets. The experimental results show that DualAttNet surpasses baselines by 0.6% to 2.7% mAP and 1.4% to 4.7% AP50 with different detection architectures. The code for our work and more technical details can be found at https://github.com/xq141839/DualAttNet. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.13813v1-abstract-full').style.display = 'none'; document.getElementById('2306.13813v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.06930">arXiv:2306.06930</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.06930">pdf</a>, <a href="https://arxiv.org/format/2306.06930">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Localised Adaptive Spatial-Temporal Graph Neural Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenying Duan</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+X">Xiaoxi He</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Z">Zimu Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Thiele%2C+L">Lothar Thiele</a>, <a href="/search/cs?searchtype=author&amp;query=Rao%2C+H">Hong Rao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.06930v2-abstract-short" style="display: inline;"> Spatial-temporal graph models are prevailing for abstracting and modelling spatial and temporal dependencies. In this work, we ask the following question: whether and to what extent can we localise spatial-temporal graph models? We limit our scope to adaptive spatial-temporal graph neural networks (ASTGNNs), the state-of-the-art model architecture. Our approach to localisation involves sparsifying&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.06930v2-abstract-full').style.display = 'inline'; document.getElementById('2306.06930v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.06930v2-abstract-full" style="display: none;"> Spatial-temporal graph models are prevailing for abstracting and modelling spatial and temporal dependencies. In this work, we ask the following question: whether and to what extent can we localise spatial-temporal graph models? We limit our scope to adaptive spatial-temporal graph neural networks (ASTGNNs), the state-of-the-art model architecture. Our approach to localisation involves sparsifying the spatial graph adjacency matrices. To this end, we propose Adaptive Graph Sparsification (AGS), a graph sparsification algorithm which successfully enables the localisation of ASTGNNs to an extreme extent (fully localisation). We apply AGS to two distinct ASTGNN architectures and nine spatial-temporal datasets. Intriguingly, we observe that spatial graphs in ASTGNNs can be sparsified by over 99.5\% without any decline in test accuracy. Furthermore, even when ASTGNNs are fully localised, becoming graph-less and purely temporal, we record no drop in accuracy for the majority of tested datasets, with only minor accuracy deterioration observed in the remaining datasets. However, when the partially or fully localised ASTGNNs are reinitialised and retrained on the same data, there is a considerable and consistent drop in accuracy. Based on these observations, we reckon that \textit{(i)} in the tested data, the information provided by the spatial dependencies is primarily included in the information provided by the temporal dependencies and, thus, can be essentially ignored for inference; and \textit{(ii)} although the spatial dependencies provide redundant information, it is vital for the effective training of ASTGNNs and thus cannot be ignored during training. Furthermore, the localisation of ASTGNNs holds the potential to reduce the heavy computation overhead required on large-scale spatial-temporal data and further enable the distributed deployment of ASTGNNs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.06930v2-abstract-full').style.display = 'none'; document.getElementById('2306.06930v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This paper was accepted by KDD 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.02055">arXiv:2212.02055</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2212.02055">pdf</a>, <a href="https://arxiv.org/format/2212.02055">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Graph Convolutional Neural Networks with Diverse Negative Samples via Decomposed Determinant Point Processes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Xuan%2C+J">Junyu Xuan</a>, <a href="/search/cs?searchtype=author&amp;query=Qiao%2C+M">Maoying Qiao</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Jie Lu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.02055v3-abstract-short" style="display: inline;"> Graph convolutional networks (GCNs) have achieved great success in graph representation learning by extracting high-level features from nodes and their topology. Since GCNs generally follow a message-passing mechanism, each node aggregates information from its first-order neighbour to update its representation. As a result, the representations of nodes with edges between them should be positively&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.02055v3-abstract-full').style.display = 'inline'; document.getElementById('2212.02055v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.02055v3-abstract-full" style="display: none;"> Graph convolutional networks (GCNs) have achieved great success in graph representation learning by extracting high-level features from nodes and their topology. Since GCNs generally follow a message-passing mechanism, each node aggregates information from its first-order neighbour to update its representation. As a result, the representations of nodes with edges between them should be positively correlated and thus can be considered positive samples. However, there are more non-neighbour nodes in the whole graph, which provide diverse and useful information for the representation update. Two non-adjacent nodes usually have different representations, which can be seen as negative samples. Besides the node representations, the structural information of the graph is also crucial for learning. In this paper, we used quality-diversity decomposition in determinant point processes (DPP) to obtain diverse negative samples. When defining a distribution on diverse subsets of all non-neighbouring nodes, we incorporate both graph structure information and node representations. Since the DPP sampling process requires matrix eigenvalue decomposition, we propose a new shortest-path-base method to improve computational efficiency. Finally, we incorporate the obtained negative samples into the graph convolution operation. The ideas are evaluated empirically in experiments on node classification tasks. These experiments show that the newly proposed methods not only improve the overall performance of standard representation learning but also significantly alleviate over-smoothing problems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.02055v3-abstract-full').style.display = 'none'; document.getElementById('2212.02055v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by IEEE TNNLS on 30-Aug-2023. arXiv admin note: text overlap with arXiv:2210.00728</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.00728">arXiv:2210.00728</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2210.00728">pdf</a>, <a href="https://arxiv.org/format/2210.00728">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1609/aaai.v36i6.20608">10.1609/aaai.v36i6.20608 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Learning from the Dark: Boosting Graph Convolutional Neural Networks with Diverse Negative Samples </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Xuan%2C+J">Junyu Xuan</a>, <a href="/search/cs?searchtype=author&amp;query=Qiao%2C+M">Maoying Qiao</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Jie Lu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.00728v1-abstract-short" style="display: inline;"> Graph Convolutional Neural Networks (GCNs) has been generally accepted to be an effective tool for node representations learning. An interesting way to understand GCNs is to think of them as a message passing mechanism where each node updates its representation by accepting information from its neighbours (also known as positive samples). However, beyond these neighbouring nodes, graphs have a lar&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.00728v1-abstract-full').style.display = 'inline'; document.getElementById('2210.00728v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.00728v1-abstract-full" style="display: none;"> Graph Convolutional Neural Networks (GCNs) has been generally accepted to be an effective tool for node representations learning. An interesting way to understand GCNs is to think of them as a message passing mechanism where each node updates its representation by accepting information from its neighbours (also known as positive samples). However, beyond these neighbouring nodes, graphs have a large, dark, all-but forgotten world in which we find the non-neighbouring nodes (negative samples). In this paper, we show that this great dark world holds a substantial amount of information that might be useful for representation learning. Most specifically, it can provide negative information about the node representations. Our overall idea is to select appropriate negative samples for each node and incorporate the negative information contained in these samples into the representation updates. Moreover, we show that the process of selecting the negative samples is not trivial. Our theme therefore begins by describing the criteria for a good negative sample, followed by a determinantal point process algorithm for efficiently obtaining such samples. A GCN, boosted by diverse negative samples, then jointly considers the positive and negative information when passing messages. Experimental evaluations show that this idea not only improves the overall performance of standard representation learning but also significantly alleviates over-smoothing problems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.00728v1-abstract-full').style.display = 'none'; document.getElementById('2210.00728v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.15027">arXiv:2206.15027</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.15027">pdf</a>, <a href="https://arxiv.org/format/2206.15027">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Interpretable Melody Generation from Lyrics with Discrete-Valued Adversarial Training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhe Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+Y">Yi Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Oyama%2C+K">Keizo Oyama</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.15027v1-abstract-short" style="display: inline;"> Generating melody from lyrics is an interesting yet challenging task in the area of artificial intelligence and music. However, the difficulty of keeping the consistency between input lyrics and generated melody limits the generation quality of previous works. In our proposal, we demonstrate our proposed interpretable lyrics-to-melody generation system which can interact with users to understand t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.15027v1-abstract-full').style.display = 'inline'; document.getElementById('2206.15027v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.15027v1-abstract-full" style="display: none;"> Generating melody from lyrics is an interesting yet challenging task in the area of artificial intelligence and music. However, the difficulty of keeping the consistency between input lyrics and generated melody limits the generation quality of previous works. In our proposal, we demonstrate our proposed interpretable lyrics-to-melody generation system which can interact with users to understand the generation process and recreate the desired songs. To improve the reliability of melody generation that matches lyrics, mutual information is exploited to strengthen the consistency between lyrics and generated melodies. Gumbel-Softmax is exploited to solve the non-differentiability problem of generating discrete music attributes by Generative Adversarial Networks (GANs). Moreover, the predicted probabilities output by the generator is utilized to recommend music attributes. Interacting with our lyrics-to-melody generation system, users can listen to the generated AI song as well as recreate a new song by selecting from recommended music attributes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.15027v1-abstract-full').style.display = 'none'; document.getElementById('2206.15027v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">3 pages, 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.03170">arXiv:2205.03170</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.03170">pdf</a>, <a href="https://arxiv.org/format/2205.03170">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Event Concealment and Concealability Enforcement in Discrete Event Systems Under Partial Observation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Hadjicostis%2C+C+N">Christoforos N. Hadjicostis</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zhiwu Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.03170v2-abstract-short" style="display: inline;"> Inspired by privacy problems where the behavior of a system should not be revealed to an external curious observer, we investigate event concealment and concealability enforcement in discrete event systems modeled as non-deterministic finite automata under partial observation. Given a subset of secret events in a given system, concealability holds if the occurrence of all secret events remains hid&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.03170v2-abstract-full').style.display = 'inline'; document.getElementById('2205.03170v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.03170v2-abstract-full" style="display: none;"> Inspired by privacy problems where the behavior of a system should not be revealed to an external curious observer, we investigate event concealment and concealability enforcement in discrete event systems modeled as non-deterministic finite automata under partial observation. Given a subset of secret events in a given system, concealability holds if the occurrence of all secret events remains hidden to a curious observer (an eavesdropper). A secret event is said to be (at least under some executions) unconcealable (inferable) if its occurrence can be indirectly determined with certainty after a finite number of observations. When concealability of a system does not hold (i.e., one or more secret events are unconcealable), we analyze how a defender, placed at the interface of the system with the eavesdropper, can be used to enforce concealability. The defender takes as input each observed event of the system and outputs a carefully modified event sequence (seen by the eavesdropper) using event deletion, insertion, or replacement. The defender is said to be C-enforceable if, following the occurrence of the secret events and regardless of subsequent activity generated by the system, it can always deploy a strategy to manipulate observations and conceal the events perpetually. We discuss systematic procedures to detect the presence of unconcealable secret events and verify C-Enforceability using techniques from state estimation and event diagnosis. We also propose a polynomial complexity construction for obtaining one necessary and one sufficient condition for C-Enforceability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.03170v2-abstract-full').style.display = 'none'; document.getElementById('2205.03170v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.15829">arXiv:2203.15829</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.15829">pdf</a>, <a href="https://arxiv.org/format/2203.15829">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/FG47880.2020.00050">10.1109/FG47880.2020.00050 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> An EEG-Based Multi-Modal Emotion Database with Both Posed and Authentic Facial Actions for Emotion Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xiaotian Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xiang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+H">Huiyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenna Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+W">Weiying Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+L">Lijun Yin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.15829v1-abstract-short" style="display: inline;"> Emotion is an experience associated with a particular pattern of physiological activity along with different physiological, behavioral and cognitive changes. One behavioral change is facial expression, which has been studied extensively over the past few decades. Facial behavior varies with a person&#39;s emotion according to differences in terms of culture, personality, age, context, and environment.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.15829v1-abstract-full').style.display = 'inline'; document.getElementById('2203.15829v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.15829v1-abstract-full" style="display: none;"> Emotion is an experience associated with a particular pattern of physiological activity along with different physiological, behavioral and cognitive changes. One behavioral change is facial expression, which has been studied extensively over the past few decades. Facial behavior varies with a person&#39;s emotion according to differences in terms of culture, personality, age, context, and environment. In recent years, physiological activities have been used to study emotional responses. A typical signal is the electroencephalogram (EEG), which measures brain activity. Most of existing EEG-based emotion analysis has overlooked the role of facial expression changes. There exits little research on the relationship between facial behavior and brain signals due to the lack of dataset measuring both EEG and facial action signals simultaneously. To address this problem, we propose to develop a new database by collecting facial expressions, action units, and EEGs simultaneously. We recorded the EEGs and face videos of both posed facial actions and spontaneous expressions from 29 participants with different ages, genders, ethnic backgrounds. Differing from existing approaches, we designed a protocol to capture the EEG signals by evoking participants&#39; individual action units explicitly. We also investigated the relation between the EEG signals and facial action units. As a baseline, the database has been evaluated through the experiments on both posed and spontaneous emotion recognition with images alone, EEG alone, and EEG fused with images, respectively. The database will be released to the research community to advance the state of the art for automatic emotion recognition. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.15829v1-abstract-full').style.display = 'none'; document.getElementById('2203.15829v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> FG2021(long Oral) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.10808">arXiv:2202.10808</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.10808">pdf</a>, <a href="https://arxiv.org/format/2202.10808">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Combating Distribution Shift for Accurate Time Series Forecasting via Hypernetworks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenying Duan</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+X">Xiaoxi He</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+L">Lu Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Thiele%2C+L">Lothar Thiele</a>, <a href="/search/cs?searchtype=author&amp;query=Rao%2C+H">Hong Rao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.10808v2-abstract-short" style="display: inline;"> Time series forecasting has widespread applications in urban life ranging from air quality monitoring to traffic analysis. However, accurate time series forecasting is challenging because real-world time series suffer from the distribution shift problem, where their statistical properties change over time. Despite extensive solutions to distribution shifts in domain adaptation or generalization, t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.10808v2-abstract-full').style.display = 'inline'; document.getElementById('2202.10808v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.10808v2-abstract-full" style="display: none;"> Time series forecasting has widespread applications in urban life ranging from air quality monitoring to traffic analysis. However, accurate time series forecasting is challenging because real-world time series suffer from the distribution shift problem, where their statistical properties change over time. Despite extensive solutions to distribution shifts in domain adaptation or generalization, they fail to function effectively in unknown, constantly-changing distribution shifts, which are common in time series. In this paper, we propose Hyper Time- Series Forecasting (HTSF), a hypernetwork-based framework for accurate time series forecasting under distribution shift. HTSF jointly learns the time-varying distributions and the corresponding forecasting models in an end-to-end fashion. Specifically, HTSF exploits the hyper layers to learn the best characterization of the distribution shifts, generating the model parameters for the main layers to make accurate predictions. We implement HTSF as an extensible framework that can incorporate diverse time series forecasting models such as RNNs and Transformers. Extensive experiments on 9 benchmarks demonstrate that HTSF achieves state-of-the-art performances. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.10808v2-abstract-full').style.display = 'none'; document.getElementById('2202.10808v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.00972">arXiv:2202.00972</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.00972">pdf</a>, <a href="https://arxiv.org/ps/2202.00972">ps</a>, <a href="https://arxiv.org/format/2202.00972">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> DCSAU-Net: A Deeper and More Compact Split-Attention U-Net for Medical Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Q">Qing Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Z">Zhicheng Ma</a>, <a href="/search/cs?searchtype=author&amp;query=HE%2C+N">Na HE</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenting Duan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.00972v2-abstract-short" style="display: inline;"> Deep learning architecture with convolutional neural network (CNN) achieves outstanding success in the field of computer vision. Where U-Net, an encoder-decoder architecture structured by CNN, makes a great breakthrough in biomedical image segmentation and has been applied in a wide range of practical scenarios. However, the equal design of every downsampling layer in the encoder part and simply s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.00972v2-abstract-full').style.display = 'inline'; document.getElementById('2202.00972v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.00972v2-abstract-full" style="display: none;"> Deep learning architecture with convolutional neural network (CNN) achieves outstanding success in the field of computer vision. Where U-Net, an encoder-decoder architecture structured by CNN, makes a great breakthrough in biomedical image segmentation and has been applied in a wide range of practical scenarios. However, the equal design of every downsampling layer in the encoder part and simply stacked convolutions do not allow U-Net to extract sufficient information of features from different depths. The increasing complexity of medical images brings new challenges to the existing methods. In this paper, we propose a deeper and more compact split-attention u-shape network (DCSAU-Net), which efficiently utilises low-level and high-level semantic information based on two novel frameworks: primary feature conservation and compact split-attention block. We evaluate the proposed model on CVC-ClinicDB, 2018 Data Science Bowl, ISIC-2018 and SegPC-2021 datasets. As a result, DCSAU-Net displays better performance than other state-of-the-art (SOTA) methods in terms of the mean Intersection over Union (mIoU) and F1-socre. More significantly, the proposed model demonstrates excellent segmentation performance on challenging images. The code for our work and more technical details can be found at https://github.com/xq141839/DCSAU-Net. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.00972v2-abstract-full').style.display = 'none'; document.getElementById('2202.00972v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.05794">arXiv:2112.05794</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.05794">pdf</a>, <a href="https://arxiv.org/format/2112.05794">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A Label Correction Algorithm Using Prior Information for Automatic and Accurate Geospatial Object Recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Weiwei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Chiang%2C+Y">Yao-Yi Chiang</a>, <a href="/search/cs?searchtype=author&amp;query=Leyk%2C+S">Stefan Leyk</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+J+H">Johannes H. Uhl</a>, <a href="/search/cs?searchtype=author&amp;query=Knoblock%2C+C+A">Craig A. Knoblock</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.05794v1-abstract-short" style="display: inline;"> Thousands of scanned historical topographic maps contain valuable information covering long periods of time, such as how the hydrography of a region has changed over time. Efficiently unlocking the information in these maps requires training a geospatial objects recognition system, which needs a large amount of annotated data. Overlapping geo-referenced external vector data with topographic maps a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.05794v1-abstract-full').style.display = 'inline'; document.getElementById('2112.05794v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.05794v1-abstract-full" style="display: none;"> Thousands of scanned historical topographic maps contain valuable information covering long periods of time, such as how the hydrography of a region has changed over time. Efficiently unlocking the information in these maps requires training a geospatial objects recognition system, which needs a large amount of annotated data. Overlapping geo-referenced external vector data with topographic maps according to their coordinates can annotate the desired objects&#39; locations in the maps automatically. However, directly overlapping the two datasets causes misaligned and false annotations because the publication years and coordinate projection systems of topographic maps are different from the external vector data. We propose a label correction algorithm, which leverages the color information of maps and the prior shape information of the external vector data to reduce misaligned and false annotations. The experiments show that the precision of annotations from the proposed algorithm is 10% higher than the annotations from a state-of-the-art algorithm. Consequently, recognition results using the proposed algorithm&#39;s annotations achieve 9% higher correctness than using the annotations from the state-of-the-art algorithm. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.05794v1-abstract-full').style.display = 'none'; document.getElementById('2112.05794v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.05786">arXiv:2112.05786</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.05786">pdf</a>, <a href="https://arxiv.org/format/2112.05786">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Guided Generative Models using Weak Supervision for Detecting Object Spatial Arrangement in Overhead Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Weiwei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Chiang%2C+Y">Yao-Yi Chiang</a>, <a href="/search/cs?searchtype=author&amp;query=Leyk%2C+S">Stefan Leyk</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+J+H">Johannes H. Uhl</a>, <a href="/search/cs?searchtype=author&amp;query=Knoblock%2C+C+A">Craig A. Knoblock</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.05786v1-abstract-short" style="display: inline;"> The increasing availability and accessibility of numerous overhead images allows us to estimate and assess the spatial arrangement of groups of geospatial target objects, which can benefit many applications, such as traffic monitoring and agricultural monitoring. Spatial arrangement estimation is the process of identifying the areas which contain the desired objects in overhead images. Traditional&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.05786v1-abstract-full').style.display = 'inline'; document.getElementById('2112.05786v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.05786v1-abstract-full" style="display: none;"> The increasing availability and accessibility of numerous overhead images allows us to estimate and assess the spatial arrangement of groups of geospatial target objects, which can benefit many applications, such as traffic monitoring and agricultural monitoring. Spatial arrangement estimation is the process of identifying the areas which contain the desired objects in overhead images. Traditional supervised object detection approaches can estimate accurate spatial arrangement but require large amounts of bounding box annotations. Recent semi-supervised clustering approaches can reduce manual labeling but still require annotations for all object categories in the image. This paper presents the target-guided generative model (TGGM), under the Variational Auto-encoder (VAE) framework, which uses Gaussian Mixture Models (GMM) to estimate the distributions of both hidden and decoder variables in VAE. Modeling both hidden and decoder variables by GMM reduces the required manual annotations significantly for spatial arrangement estimation. Unlike existing approaches that the training process can only update the GMM as a whole in the optimization iterations (e.g., a &#34;minibatch&#34;), TGGM allows the update of individual GMM components separately in the same optimization iteration. Optimizing GMM components separately allows TGGM to exploit the semantic relationships in spatial data and requires only a few labels to initiate and guide the generative process. Our experiments shows that TGGM achieves results comparable to the state-of-the-art semi-supervised methods and outperforms unsupervised methods by 10% based on the $F_{1}$ scores, while requiring significantly fewer labeled data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.05786v1-abstract-full').style.display = 'none'; document.getElementById('2112.05786v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.14941">arXiv:2110.14941</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.14941">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> A Novel Sample-efficient Deep Reinforcement Learning with Episodic Policy Transfer for PID-Based Control in Cardiac Catheterization Robots </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Omisore%2C+O+M">Olatunji Mumini Omisore</a>, <a href="/search/cs?searchtype=author&amp;query=Akinyemi%2C+T">Toluwanimi Akinyemi</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenke Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+W">Wenjing Du</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Lei Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.14941v1-abstract-short" style="display: inline;"> Robotic catheterization is typically used for percutaneous coronary intervention procedures nowadays and it involves steering flexible endovascular tools to open up occlusion in the coronaries. In this study, a sample-efficient deep reinforcement learning with episodic policy transfer is, for the first time, used for motion control during robotic catheterization with fully adaptive PID tuning stra&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.14941v1-abstract-full').style.display = 'inline'; document.getElementById('2110.14941v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.14941v1-abstract-full" style="display: none;"> Robotic catheterization is typically used for percutaneous coronary intervention procedures nowadays and it involves steering flexible endovascular tools to open up occlusion in the coronaries. In this study, a sample-efficient deep reinforcement learning with episodic policy transfer is, for the first time, used for motion control during robotic catheterization with fully adaptive PID tuning strategy. The reinforcement model aids the agent to continuously learn from its interactions in its environment and adaptively tune PID control gains for axial navigation of endovascular tool. The model was validated for axial motion control of a robotic system designed for intravascular catheterization. Simulation and experimental trials were done to validate the application of the model, and results obtained shows it could self-tune PID gains appropriately for motion control of a robotic catheter system. Performance comparison with conventional methods in average of 10 trials shows the agent tunes the gain better with error of 0.003 mm. Thus, the proposed model would offer more stable set-point motion control robotic catheterization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.14941v1-abstract-full').style.display = 'none'; document.getElementById('2110.14941v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.00711">arXiv:2109.00711</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.00711">pdf</a>, <a href="https://arxiv.org/format/2109.00711">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Disordered Systems and Neural Networks">cond-mat.dis-nn</span> </div> </div> <p class="title is-5 mathjax"> Heterogeneous relational message passing networks for molecular dynamics simulations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+S">Sibo Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Yong Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Hao%2C+S">Shaogang Hao</a>, <a href="/search/cs?searchtype=author&amp;query=Hsieh%2C+C+Y">Chang Yu Hsieh</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+B">Bing-Lin Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenhui Duan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.00711v1-abstract-short" style="display: inline;"> With many frameworks based on message passing neural networks proposed to predict molecular and bulk properties, machine learning methods have tremendously shifted the paradigms of computational sciences underpinning physics, material science, chemistry, and biology. While existing machine learning models have yielded superior performances in many occasions, most of them model and process molecula&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.00711v1-abstract-full').style.display = 'inline'; document.getElementById('2109.00711v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.00711v1-abstract-full" style="display: none;"> With many frameworks based on message passing neural networks proposed to predict molecular and bulk properties, machine learning methods have tremendously shifted the paradigms of computational sciences underpinning physics, material science, chemistry, and biology. While existing machine learning models have yielded superior performances in many occasions, most of them model and process molecular systems in terms of homogeneous graph, which severely limits the expressive power for representing diverse interactions. In practice, graph data with multiple node and edge types is ubiquitous and more appropriate for molecular systems. Thus, we propose the heterogeneous relational message passing network (HermNet), an end-to-end heterogeneous graph neural networks, to efficiently express multiple interactions in a single model with {\it ab initio} accuracy. HermNet performs impressively against many top-performing models on both molecular and extended systems. Specifically, HermNet outperforms other tested models in nearly 75\%, 83\% and 94\% of tasks on MD17, QM9 and extended systems datasets, respectively. Finally, we elucidate how the design of HermNet is compatible with quantum mechanics from the perspective of the density functional theory. Besides, HermNet is a universal framework, whose sub-networks could be replaced by other advanced models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.00711v1-abstract-full').style.display = 'none'; document.getElementById('2109.00711v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.02930">arXiv:2101.02930</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.02930">pdf</a>, <a href="https://arxiv.org/format/2101.02930">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Disordered Systems and Neural Networks">cond-mat.dis-nn</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Symmetry-adapted graph neural networks for constructing molecular dynamics force fields </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+S">Sibo Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+S">Shiqiao Du</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Yong Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+B">Bing-Lin Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenhui Duan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.02930v1-abstract-short" style="display: inline;"> Molecular dynamics is a powerful simulation tool to explore material properties. Most of the realistic material systems are too large to be simulated with first-principles molecular dynamics. Classical molecular dynamics has lower computational cost but requires accurate force fields to achieve chemical accuracy. In this work, we develop a symmetry-adapted graph neural networks framework, named mo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.02930v1-abstract-full').style.display = 'inline'; document.getElementById('2101.02930v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.02930v1-abstract-full" style="display: none;"> Molecular dynamics is a powerful simulation tool to explore material properties. Most of the realistic material systems are too large to be simulated with first-principles molecular dynamics. Classical molecular dynamics has lower computational cost but requires accurate force fields to achieve chemical accuracy. In this work, we develop a symmetry-adapted graph neural networks framework, named molecular dynamics graph neural networks (MDGNN), to construct force fields automatically for molecular dynamics simulations for both molecules and crystals. This architecture consistently preserves the translation, rotation and permutation invariance in the simulations. We propose a new feature engineering method including higher order contributions and show that MDGNN accurately reproduces the results of both classical and first-principles molecular dynamics. We also demonstrate that force fields constructed by the model has good transferability. Therefore, MDGNN provides an efficient and promising option for molecular dynamics simulations of large scale systems with high accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.02930v1-abstract-full').style.display = 'none'; document.getElementById('2101.02930v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.02840">arXiv:2011.02840</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2011.02840">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-72087-2_36">10.1007/978-3-030-72087-2_36 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> DR-Unet104 for Multimodal MRI brain tumor segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Colman%2C+J">Jordan Colman</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Lei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wenting Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+X">Xujiong Ye</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.02840v2-abstract-short" style="display: inline;"> In this paper we propose a 2D deep residual Unet with 104 convolutional layers (DR-Unet104) for lesion segmentation in brain MRIs. We make multiple additions to the Unet architecture, including adding the &#39;bottleneck&#39; residual block to the Unet encoder and adding dropout after each convolution block stack. We verified the effect of introducing the regularisation of dropout with small rate (e.g. 0.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.02840v2-abstract-full').style.display = 'inline'; document.getElementById('2011.02840v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.02840v2-abstract-full" style="display: none;"> In this paper we propose a 2D deep residual Unet with 104 convolutional layers (DR-Unet104) for lesion segmentation in brain MRIs. We make multiple additions to the Unet architecture, including adding the &#39;bottleneck&#39; residual block to the Unet encoder and adding dropout after each convolution block stack. We verified the effect of introducing the regularisation of dropout with small rate (e.g. 0.2) on the architecture, and found a dropout of 0.2 improved the overall performance compared to no dropout, or a dropout of 0.5. We evaluated the proposed architecture as part of the Multimodal Brain Tumor Segmentation (BraTS) 2020 Challenge and compared our method to DeepLabV3+ with a ResNet-V2-152 backbone. We found that the DR-Unet104 achieved a mean dice score coefficient of 0.8862, 0.6756 and 0.6721 for validation data, whole tumor, enhancing tumor and tumor core respectively, an overall improvement on 0.8770, 0.65242 and 0.68134 achieved by DeepLabV3+. Our method produced a final mean DSC of 0.8673, 0.7514 and 0.7983 on whole tumor, enhancing tumor and tumor core on the challenge&#39;s testing data. We produced a competitive lesion segmentation architecture, despite only 2D convolutions, having the added benefit that it can be used on lower power computers than a 3D architecture. The source code and trained model for this work is openly available at https://github.com/jordan-colman/DR-Unet104. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.02840v2-abstract-full').style.display = 'none'; document.getElementById('2011.02840v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Part of the Multimodal Brain Tumor Segmentation 2020 Challenge conference proceedings</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> BrainLes 2020. Lecture Notes in Computer Science, vol 12659, pp 410-419 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.02131">arXiv:2009.02131</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.02131">pdf</a>, <a href="https://arxiv.org/ps/2009.02131">ps</a>, <a href="https://arxiv.org/format/2009.02131">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> 5G Technologies Based Remote E-Health: Architecture, Applications, and Solutions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+Y">Yancheng Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+G">Guoan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Frascolla%2C+V">Valerio Frascolla</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xin Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.02131v1-abstract-short" style="display: inline;"> Currently, many countries are facing the problems of aging population, serious imbalance of medical resources supply and demand, as well as uneven geographical distribution, resulting in a huge demand for remote e-health. Particularly, with invasions of COVID-19, the health of people and even social stability have been challenged unprecedentedly. To contribute to these urgent problems, this articl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.02131v1-abstract-full').style.display = 'inline'; document.getElementById('2009.02131v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.02131v1-abstract-full" style="display: none;"> Currently, many countries are facing the problems of aging population, serious imbalance of medical resources supply and demand, as well as uneven geographical distribution, resulting in a huge demand for remote e-health. Particularly, with invasions of COVID-19, the health of people and even social stability have been challenged unprecedentedly. To contribute to these urgent problems, this article proposes a general architecture of the remote e-health, where the city hospital provides the technical supports and services for remote hospitals. Meanwhile, 5G technologies supported telemedicine is introduced to satisfy the high-speed transmission of massive multimedia medical data, and further realize the sharing of medical resources. Moreover, to turn passivity into initiative to prevent COVID-19, a broad area epidemic prevention and control scheme is also investigated, especially for the remote areas. We discuss their principles and key features, and foresee the challenges, opportunities, and future research trends. Finally, a node value and content popularity based caching strategy is introduced to provide a preliminary solution of the massive data storage and low-latency transmission. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.02131v1-abstract-full').style.display = 'none'; document.getElementById('2009.02131v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1810.06837">arXiv:1810.06837</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1810.06837">pdf</a>, <a href="https://arxiv.org/ps/1810.06837">ps</a>, <a href="https://arxiv.org/format/1810.06837">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Capacity Enhanced Cooperative D2D Systems over Rayleigh Fading Channels with NOMA </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Ju%2C+J">Jinjuan Ju</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Q">Qiang Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+Y">Yancheng Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhiliang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Choi%2C+J">Jeaho Choi</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+G">Guoan Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1810.06837v1-abstract-short" style="display: inline;"> This paper considers the cooperative device-to-device (D2D) systems with non-orthogonal multiple access (NOMA). We assume that the base station (BS) can communicate simultaneously with all users to satisfy the full information transmission. In order to characterize the impact of the weak channel and different decoding schemes, two kinds of decoding strategies are proposed: \emph{single signal deco&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.06837v1-abstract-full').style.display = 'inline'; document.getElementById('1810.06837v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1810.06837v1-abstract-full" style="display: none;"> This paper considers the cooperative device-to-device (D2D) systems with non-orthogonal multiple access (NOMA). We assume that the base station (BS) can communicate simultaneously with all users to satisfy the full information transmission. In order to characterize the impact of the weak channel and different decoding schemes, two kinds of decoding strategies are proposed: \emph{single signal decoding scheme} and \emph{MRC decoding scheme}, respectively. For the \emph{single signal decoding scheme}, the users immediately decode the received signals after receptions from the BS. Meanwhile, for the \emph{MRC decoding scheme}, instead of decoding, the users will keep the receptions in reserve until the corresponding phase comes and the users jointly decode the received signals by employing maximum ratio combining (MRC). Considering Rayleigh fading channels, the ergodic sum-rate (SR), outage probability and outage capacity of the proposed D2D-NOMA system are analyzed. Moreover, approximate expressions for the ergodic SR are also provided with a negligible performance loss. Numerical results demonstrate that the ergodic SR and outage probability of the proposed D2D-NOMA scheme overwhelm that of the conventional NOMA schemes. Furthermore, it is also revealed that the system performance including the ergodic SR and outage probability are limited by the poor channel condition for both the \emph{single signal decoding scheme} and conventional NOMA schemes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.06837v1-abstract-full').style.display = 'none'; document.getElementById('1810.06837v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24pages, 8figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1604.01151">arXiv:1604.01151</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1604.01151">pdf</a>, <a href="https://arxiv.org/ps/1604.01151">ps</a>, <a href="https://arxiv.org/format/1604.01151">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Use of Non-Orthogonal Multiple Access in Dual-hop relaying </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Wen%2C+M">Miaowen Wen</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+Y">Yier Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Xiong%2C+Z">Zixiang Xiong</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+M+H">Moon Ho Lee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1604.01151v2-abstract-short" style="display: inline;"> To improve the sum-rate (SR) of the dual-hop relay system, a novel two-stage power allocation scheme with non-orthogonal multiple access (NOMA) is proposed. In this scheme, after the reception of the superposition coded symbol with a power allocation from the source, the relay node forwards a new superposition coded symbol with another power allocation to the destination. By employing the maximum&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1604.01151v2-abstract-full').style.display = 'inline'; document.getElementById('1604.01151v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1604.01151v2-abstract-full" style="display: none;"> To improve the sum-rate (SR) of the dual-hop relay system, a novel two-stage power allocation scheme with non-orthogonal multiple access (NOMA) is proposed. In this scheme, after the reception of the superposition coded symbol with a power allocation from the source, the relay node forwards a new superposition coded symbol with another power allocation to the destination. By employing the maximum ratio combination (MRC), the destination jointly decodes the information symbols from the source and the relay. Assuming Rayleigh fading channels, closed-form solution of the ergodic SR at high signal-to-noise ratio (SNR) is derived and a practical power allocation is also designed for the proposed NOMA scheme. Through numerical results, it is shown that the performance of the proposed scheme is significantly improved compared with the existing work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1604.01151v2-abstract-full').style.display = 'none'; document.getElementById('1604.01151v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 April, 2016; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 April, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2016. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1502.04860">arXiv:1502.04860</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1502.04860">pdf</a>, <a href="https://arxiv.org/ps/1502.04860">ps</a>, <a href="https://arxiv.org/format/1502.04860">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Low-Complexity QL-QR Decomposition Based Beamforming Design for Two-Way MIMO Relay Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+W">Wei Song</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+M+H">Moon Ho Lee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1502.04860v1-abstract-short" style="display: inline;"> In this paper, we investigate the optimization problem of joint source and relay beamforming matrices for a twoway amplify-and-forward (AF) multi-input multi-output (MIMO) relay system. The system consisting of two source nodes and two relay nodes is considered and the linear minimum meansquare- error (MMSE) is employed at both receivers. We assume individual relay power constraints and study an i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1502.04860v1-abstract-full').style.display = 'inline'; document.getElementById('1502.04860v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1502.04860v1-abstract-full" style="display: none;"> In this paper, we investigate the optimization problem of joint source and relay beamforming matrices for a twoway amplify-and-forward (AF) multi-input multi-output (MIMO) relay system. The system consisting of two source nodes and two relay nodes is considered and the linear minimum meansquare- error (MMSE) is employed at both receivers. We assume individual relay power constraints and study an important design problem, a so-called determinant maximization (DM) problem. Since this DM problem is nonconvex, we consider an efficient iterative algorithm by using an MSE balancing result to obtain at least a locally optimal solution. The proposed algorithm is developed based on QL, QR and Choleskey decompositions which differ in the complexity and performance. Analytical and simulation results show that the proposed algorithm can significantly reduce computational complexity compared with their existing two-way relay systems and have equivalent bit-error-rate (BER) performance to the singular value decomposition (SVD) based on a regular block diagonal (RBD) scheme. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1502.04860v1-abstract-full').style.display = 'none'; document.getElementById('1502.04860v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 February, 2015; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2015. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1303.2251">arXiv:1303.2251</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1303.2251">pdf</a>, <a href="https://arxiv.org/ps/1303.2251">ps</a>, <a href="https://arxiv.org/format/1303.2251">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Zero-point attracting projection algorithm for sequential compressive sensing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=You%2C+Y">Yang You</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+J">Jian Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wei Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+N">Ningning Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+Y">Yuantao Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jian Yang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1303.2251v1-abstract-short" style="display: inline;"> Sequential Compressive Sensing, which may be widely used in sensing devices, is a popular topic of recent research. This paper proposes an online recovery algorithm for sparse approximation of sequential compressive sensing. Several techniques including warm start, fast iteration, and variable step size are adopted in the proposed algorithm to improve its online performance. Finally, numerical sim&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1303.2251v1-abstract-full').style.display = 'inline'; document.getElementById('1303.2251v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1303.2251v1-abstract-full" style="display: none;"> Sequential Compressive Sensing, which may be widely used in sensing devices, is a popular topic of recent research. This paper proposes an online recovery algorithm for sparse approximation of sequential compressive sensing. Several techniques including warm start, fast iteration, and variable step size are adopted in the proposed algorithm to improve its online performance. Finally, numerical simulations demonstrate its better performance than the relative art. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1303.2251v1-abstract-full').style.display = 'none'; document.getElementById('1303.2251v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 March, 2013; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2013. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 2 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEICE Electronics Express, 9(4):314-319, 2012 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1208.0542">arXiv:1208.0542</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1208.0542">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> </div> </div> <p class="title is-5 mathjax"> A Constructive Algorithm to Prove P=NP </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+W">Wen-Qi Duan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1208.0542v1-abstract-short" style="display: inline;"> After reducing the undirected Hamiltonian cycle problem into the TSP problem with cost 0 or 1, we developed an effective algorithm to compute the optimal tour of the transformed TSP. Our algorithm is described as a growth process: initially, constructing 4-vertexes optimal tour; next, one new vertex being added into the optimal tour in such a way to obtain the new optimal tour; then, repeating the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1208.0542v1-abstract-full').style.display = 'inline'; document.getElementById('1208.0542v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1208.0542v1-abstract-full" style="display: none;"> After reducing the undirected Hamiltonian cycle problem into the TSP problem with cost 0 or 1, we developed an effective algorithm to compute the optimal tour of the transformed TSP. Our algorithm is described as a growth process: initially, constructing 4-vertexes optimal tour; next, one new vertex being added into the optimal tour in such a way to obtain the new optimal tour; then, repeating the previous step until all vertexes are included into the optimal tour. This paper has shown that our constructive algorithm can solve the undirected Hamiltonian cycle problem in polynomial time. According to Cook-Levin theorem, we argue that we have provided a constructive proof of P=NP. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1208.0542v1-abstract-full').style.display = 'none'; document.getElementById('1208.0542v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 July, 2012; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2012. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10