CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 221 results for author: <span class="mathjax">Yan, C</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Yan%2C+C">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Yan, C"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Yan%2C+C&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Yan, C"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Yan%2C+C&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Yan%2C+C&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Yan%2C+C&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Yan%2C+C&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Yan%2C+C&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Yan%2C+C&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.19625">arXiv:2503.19625</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.19625">pdf</a>, <a href="https://arxiv.org/format/2503.19625">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DynOPETs: A Versatile Benchmark for Dynamic Object Pose Estimation and Tracking in Moving Camera Scenarios </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Meng%2C+X">Xiangting Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jiaqi Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+M">Mingshu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenxin Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+Y">Yujiao Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+W">Wenchao Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Kneip%2C+L">Laurent Kneip</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.19625v1-abstract-short" style="display: inline;"> In the realm of object pose estimation, scenarios involving both dynamic objects and moving cameras are prevalent. However, the scarcity of corresponding real-world datasets significantly hinders the development and evaluation of robust pose estimation models. This is largely attributed to the inherent challenges in accurately annotating object poses in dynamic scenes captured by moving cameras. T&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.19625v1-abstract-full').style.display = 'inline'; document.getElementById('2503.19625v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.19625v1-abstract-full" style="display: none;"> In the realm of object pose estimation, scenarios involving both dynamic objects and moving cameras are prevalent. However, the scarcity of corresponding real-world datasets significantly hinders the development and evaluation of robust pose estimation models. This is largely attributed to the inherent challenges in accurately annotating object poses in dynamic scenes captured by moving cameras. To bridge this gap, this paper presents a novel dataset DynOPETs and a dedicated data acquisition and annotation pipeline tailored for object pose estimation and tracking in such unconstrained environments. Our efficient annotation method innovatively integrates pose estimation and pose tracking techniques to generate pseudo-labels, which are subsequently refined through pose graph optimization. The resulting dataset offers accurate pose annotations for dynamic objects observed from moving cameras. To validate the effectiveness and value of our dataset, we perform comprehensive evaluations using 18 state-of-the-art methods, demonstrating its potential to accelerate research in this challenging domain. The dataset will be made publicly available to facilitate further exploration and advancement in the field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.19625v1-abstract-full').style.display = 'none'; document.getElementById('2503.19625v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.18783">arXiv:2503.18783</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.18783">pdf</a>, <a href="https://arxiv.org/format/2503.18783">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Frequency Dynamic Convolution for Dense Image Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+L">Linwei Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+L">Lin Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+L">Liang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenggang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+Y">Ying Fu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.18783v2-abstract-short" style="display: inline;"> While Dynamic Convolution (DY-Conv) has shown promising performance by enabling adaptive weight selection through multiple parallel weights combined with an attention mechanism, the frequency response of these weights tends to exhibit high similarity, resulting in high parameter costs but limited adaptability. In this work, we introduce Frequency Dynamic Convolution (FDConv), a novel approach that&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.18783v2-abstract-full').style.display = 'inline'; document.getElementById('2503.18783v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.18783v2-abstract-full" style="display: none;"> While Dynamic Convolution (DY-Conv) has shown promising performance by enabling adaptive weight selection through multiple parallel weights combined with an attention mechanism, the frequency response of these weights tends to exhibit high similarity, resulting in high parameter costs but limited adaptability. In this work, we introduce Frequency Dynamic Convolution (FDConv), a novel approach that mitigates these limitations by learning a fixed parameter budget in the Fourier domain. FDConv divides this budget into frequency-based groups with disjoint Fourier indices, enabling the construction of frequency-diverse weights without increasing the parameter cost. To further enhance adaptability, we propose Kernel Spatial Modulation (KSM) and Frequency Band Modulation (FBM). KSM dynamically adjusts the frequency response of each filter at the spatial level, while FBM decomposes weights into distinct frequency bands in the frequency domain and modulates them dynamically based on local content. Extensive experiments on object detection, segmentation, and classification validate the effectiveness of FDConv. We demonstrate that when applied to ResNet-50, FDConv achieves superior performance with a modest increase of +3.6M parameters, outperforming previous methods that require substantial increases in parameter budgets (e.g., CondConv +90M, KW +76.5M). Moreover, FDConv seamlessly integrates into a variety of architectures, including ConvNeXt, Swin-Transformer, offering a flexible and efficient solution for modern vision tasks. The code is made publicly available at https://github.com/Linwei-Chen/FDConv. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.18783v2-abstract-full').style.display = 'none'; document.getElementById('2503.18783v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by CVPR 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.12329">arXiv:2503.12329</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.12329">pdf</a>, <a href="https://arxiv.org/format/2503.12329">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> CapArena: Benchmarking and Analyzing Detailed Image Captioning in the LLM Era </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+K">Kanzhi Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+W">Wenpo Song</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+J">Jiaxin Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Z">Zheng Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Q">Qiushi Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+F">Fangzhi Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenyang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+N">Nuo Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jianbing Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jiajun Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.12329v1-abstract-short" style="display: inline;"> Image captioning has been a longstanding challenge in vision-language research. With the rise of LLMs, modern Vision-Language Models (VLMs) generate detailed and comprehensive image descriptions. However, benchmarking the quality of such captions remains unresolved. This paper addresses two key questions: (1) How well do current VLMs actually perform on image captioning, particularly compared to h&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.12329v1-abstract-full').style.display = 'inline'; document.getElementById('2503.12329v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.12329v1-abstract-full" style="display: none;"> Image captioning has been a longstanding challenge in vision-language research. With the rise of LLMs, modern Vision-Language Models (VLMs) generate detailed and comprehensive image descriptions. However, benchmarking the quality of such captions remains unresolved. This paper addresses two key questions: (1) How well do current VLMs actually perform on image captioning, particularly compared to humans? We built CapArena, a platform with over 6000 pairwise caption battles and high-quality human preference votes. Our arena-style evaluation marks a milestone, showing that leading models like GPT-4o achieve or even surpass human performance, while most open-source models lag behind. (2) Can automated metrics reliably assess detailed caption quality? Using human annotations from CapArena, we evaluate traditional and recent captioning metrics, as well as VLM-as-a-Judge. Our analysis reveals that while some metrics (e.g., METEOR) show decent caption-level agreement with humans, their systematic biases lead to inconsistencies in model ranking. In contrast, VLM-as-a-Judge demonstrates robust discernment at both the caption and model levels. Building on these insights, we release CapArena-Auto, an accurate and efficient automated benchmark for detailed captioning, achieving 94.3% correlation with human rankings at just $4 per test. Data and resources will be open-sourced at https://caparena.github.io. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.12329v1-abstract-full').style.display = 'none'; document.getElementById('2503.12329v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.12042">arXiv:2503.12042</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.12042">pdf</a>, <a href="https://arxiv.org/format/2503.12042">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Prosody-Enhanced Acoustic Pre-training and Acoustic-Disentangled Prosody Adapting for Movie Dubbing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhedong Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+L">Liang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenggang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+C">Chunshan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Hengel%2C+A+v+d">Anton van den Hengel</a>, <a href="/search/cs?searchtype=author&amp;query=Qi%2C+Y">Yuankai Qi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.12042v2-abstract-short" style="display: inline;"> Movie dubbing describes the process of transforming a script into speech that aligns temporally and emotionally with a given movie clip while exemplifying the speaker&#39;s voice demonstrated in a short reference audio clip. This task demands the model bridge character performances and complicated prosody structures to build a high-quality video-synchronized dubbing track. The limited scale of movie d&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.12042v2-abstract-full').style.display = 'inline'; document.getElementById('2503.12042v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.12042v2-abstract-full" style="display: none;"> Movie dubbing describes the process of transforming a script into speech that aligns temporally and emotionally with a given movie clip while exemplifying the speaker&#39;s voice demonstrated in a short reference audio clip. This task demands the model bridge character performances and complicated prosody structures to build a high-quality video-synchronized dubbing track. The limited scale of movie dubbing datasets, along with the background noise inherent in audio data, hinder the acoustic modeling performance of trained models. To address these issues, we propose an acoustic-prosody disentangled two-stage method to achieve high-quality dubbing generation with precise prosody alignment. First, we propose a prosody-enhanced acoustic pre-training to develop robust acoustic modeling capabilities. Then, we freeze the pre-trained acoustic system and design a disentangled framework to model prosodic text features and dubbing style while maintaining acoustic quality. Additionally, we incorporate an in-domain emotion analysis module to reduce the impact of visual domain shifts across different movies, thereby enhancing emotion-prosody alignment. Extensive experiments show that our method performs favorably against the state-of-the-art models on two primary benchmarks. The demos are available at https://zzdoog.github.io/ProDubber/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.12042v2-abstract-full').style.display = 'none'; document.getElementById('2503.12042v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by CVPR2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.12006">arXiv:2503.12006</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.12006">pdf</a>, <a href="https://arxiv.org/format/2503.12006">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ROS-SAM: High-Quality Interactive Segmentation for Remote Sensing Moving Object </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shan%2C+Z">Zhe Shan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+L">Lei Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Cheng Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Heng Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+X">Xia Xie</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.12006v1-abstract-short" style="display: inline;"> The availability of large-scale remote sensing video data underscores the importance of high-quality interactive segmentation. However, challenges such as small object sizes, ambiguous features, and limited generalization make it difficult for current methods to achieve this goal. In this work, we propose ROS-SAM, a method designed to achieve high-quality interactive segmentation while preserving&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.12006v1-abstract-full').style.display = 'inline'; document.getElementById('2503.12006v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.12006v1-abstract-full" style="display: none;"> The availability of large-scale remote sensing video data underscores the importance of high-quality interactive segmentation. However, challenges such as small object sizes, ambiguous features, and limited generalization make it difficult for current methods to achieve this goal. In this work, we propose ROS-SAM, a method designed to achieve high-quality interactive segmentation while preserving generalization across diverse remote sensing data. The ROS-SAM is built upon three key innovations: 1) LoRA-based fine-tuning, which enables efficient domain adaptation while maintaining SAM&#39;s generalization ability, 2) Enhancement of deep network layers to improve the discriminability of extracted features, thereby reducing misclassifications, and 3) Integration of global context with local boundary details in the mask decoder to generate high-quality segmentation masks. Additionally, we design the data pipeline to ensure the model learns to better handle objects at varying scales during training while focusing on high-quality predictions during inference. Experiments on remote sensing video datasets show that the redesigned data pipeline boosts the IoU by 6%, while ROS-SAM increases the IoU by 13%. Finally, when evaluated on existing remote sensing object tracking datasets, ROS-SAM demonstrates impressive zero-shot capabilities, generating masks that closely resemble manual annotations. These results confirm ROS-SAM as a powerful tool for fine-grained segmentation in remote sensing applications. Code is available at https://github.com/ShanZard/ROS-SAM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.12006v1-abstract-full').style.display = 'none'; document.getElementById('2503.12006v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to CVPR 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.11004">arXiv:2503.11004</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.11004">pdf</a>, <a href="https://arxiv.org/format/2503.11004">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> VA-AR: Learning Velocity-Aware Action Representations with Mixture of Window Attention </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wei%2C+J">Jiangning Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Qin%2C+L">Lixiong Qin</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+B">Bo Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+T">Tianjian Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chuhan Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+D">Dandan Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+Y">Yang Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+L">Lan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+K">Ke Li</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Jun Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.11004v1-abstract-short" style="display: inline;"> Action recognition is a crucial task in artificial intelligence, with significant implications across various domains. We initially perform a comprehensive analysis of seven prominent action recognition methods across five widely-used datasets. This analysis reveals a critical, yet previously overlooked, observation: as the velocity of actions increases, the performance of these methods variably d&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.11004v1-abstract-full').style.display = 'inline'; document.getElementById('2503.11004v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.11004v1-abstract-full" style="display: none;"> Action recognition is a crucial task in artificial intelligence, with significant implications across various domains. We initially perform a comprehensive analysis of seven prominent action recognition methods across five widely-used datasets. This analysis reveals a critical, yet previously overlooked, observation: as the velocity of actions increases, the performance of these methods variably declines, undermining their robustness. This decline in performance poses significant challenges for their application in real-world scenarios. Building on these findings, we introduce the Velocity-Aware Action Recognition (VA-AR) framework to obtain robust action representations across different velocities. Our principal insight is that rapid actions (e.g., the giant circle backward in uneven bars or a smash in badminton) occur within short time intervals, necessitating smaller temporal attention windows to accurately capture intricate changes. Conversely, slower actions (e.g., drinking water or wiping face) require larger windows to effectively encompass the broader context. VA-AR employs a Mixture of Window Attention (MoWA) strategy, dynamically adjusting its attention window size based on the action&#39;s velocity. This adjustment enables VA-AR to obtain a velocity-aware representation, thereby enhancing the accuracy of action recognition. Extensive experiments confirm that VA-AR achieves state-of-the-art performance on the same five datasets, demonstrating VA-AR&#39;s effectiveness across a broad spectrum of action recognition scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.11004v1-abstract-full').style.display = 'none'; document.getElementById('2503.11004v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by AAAI 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.08489">arXiv:2503.08489</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.08489">pdf</a>, <a href="https://arxiv.org/format/2503.08489">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> A Triple-Inertial Accelerated Alternating Optimization Method for Deep Learning Training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chengcheng Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+J">Jiawei Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Q">Qingsong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+Z">Zheng Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.08489v2-abstract-short" style="display: inline;"> The stochastic gradient descent (SGD) algorithm has achieved remarkable success in training deep learning models. However, it has several limitations, including susceptibility to vanishing gradients, sensitivity to input data, and a lack of robust theoretical guarantees. In recent years, alternating minimization (AM) methods have emerged as a promising alternative for model training by employing g&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.08489v2-abstract-full').style.display = 'inline'; document.getElementById('2503.08489v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.08489v2-abstract-full" style="display: none;"> The stochastic gradient descent (SGD) algorithm has achieved remarkable success in training deep learning models. However, it has several limitations, including susceptibility to vanishing gradients, sensitivity to input data, and a lack of robust theoretical guarantees. In recent years, alternating minimization (AM) methods have emerged as a promising alternative for model training by employing gradient-free approaches to iteratively update model parameters. Despite their potential, these methods often exhibit slow convergence rates. To address this challenge, we propose a novel Triple-Inertial Accelerated Alternating Minimization (TIAM) framework for neural network training. The TIAM approach incorporates a triple-inertial acceleration strategy with a specialized approximation method, facilitating targeted acceleration of different terms in each sub-problem optimization. This integration improves the efficiency of convergence, achieving superior performance with fewer iterations. Additionally, we provide a convergence analysis of the TIAM algorithm, including its global convergence properties and convergence rate. Extensive experiments validate the effectiveness of the TIAM method, showing significant improvements in generalization capability and computational efficiency compared to existing approaches, particularly when applied to the rectified linear unit (ReLU) and its variants. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.08489v2-abstract-full').style.display = 'none'; document.getElementById('2503.08489v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.04980">arXiv:2503.04980</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.04980">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> A Consensus Privacy Metrics Framework for Synthetic Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pilgram%2C+L">Lisa Pilgram</a>, <a href="/search/cs?searchtype=author&amp;query=Dankar%2C+F+K">Fida K. Dankar</a>, <a href="/search/cs?searchtype=author&amp;query=Drechsler%2C+J">Jorg Drechsler</a>, <a href="/search/cs?searchtype=author&amp;query=Elliot%2C+M">Mark Elliot</a>, <a href="/search/cs?searchtype=author&amp;query=Domingo-Ferrer%2C+J">Josep Domingo-Ferrer</a>, <a href="/search/cs?searchtype=author&amp;query=Francis%2C+P">Paul Francis</a>, <a href="/search/cs?searchtype=author&amp;query=Kantarcioglu%2C+M">Murat Kantarcioglu</a>, <a href="/search/cs?searchtype=author&amp;query=Kong%2C+L">Linglong Kong</a>, <a href="/search/cs?searchtype=author&amp;query=Malin%2C+B">Bradley Malin</a>, <a href="/search/cs?searchtype=author&amp;query=Muralidhar%2C+K">Krishnamurty Muralidhar</a>, <a href="/search/cs?searchtype=author&amp;query=Myles%2C+P">Puja Myles</a>, <a href="/search/cs?searchtype=author&amp;query=Prasser%2C+F">Fabian Prasser</a>, <a href="/search/cs?searchtype=author&amp;query=Raisaro%2C+J+L">Jean Louis Raisaro</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chao Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Emam%2C+K+E">Khaled El Emam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.04980v1-abstract-short" style="display: inline;"> Synthetic data generation is one approach for sharing individual-level data. However, to meet legislative requirements, it is necessary to demonstrate that the individuals&#39; privacy is adequately protected. There is no consolidated standard for measuring privacy in synthetic data. Through an expert panel and consensus process, we developed a framework for evaluating privacy in synthetic data. Our f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.04980v1-abstract-full').style.display = 'inline'; document.getElementById('2503.04980v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.04980v1-abstract-full" style="display: none;"> Synthetic data generation is one approach for sharing individual-level data. However, to meet legislative requirements, it is necessary to demonstrate that the individuals&#39; privacy is adequately protected. There is no consolidated standard for measuring privacy in synthetic data. Through an expert panel and consensus process, we developed a framework for evaluating privacy in synthetic data. Our findings indicate that current similarity metrics fail to measure identity disclosure, and their use is discouraged. For differentially private synthetic data, a privacy budget other than close to zero was not considered interpretable. There was consensus on the importance of membership and attribute disclosure, both of which involve inferring personal information about an individual without necessarily revealing their identity. The resultant framework provides precise recommendations for metrics that address these types of disclosures effectively. Our findings further present specific opportunities for future research that can help with widespread adoption of synthetic data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.04980v1-abstract-full').style.display = 'none'; document.getElementById('2503.04980v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.04446">arXiv:2503.04446</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.04446">pdf</a>, <a href="https://arxiv.org/format/2503.04446">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> SMTPD: A New Benchmark for Temporal Prediction of Social Media Popularity </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Yijie Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+B">Bolun Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+W">Wei Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+H">Hangjia Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+Y">Yuchen Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+N">Ning Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+A">Anan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Quan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenggang Yan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.04446v1-abstract-short" style="display: inline;"> Social media popularity prediction task aims to predict the popularity of posts on social media platforms, which has a positive driving effect on application scenarios such as content optimization, digital marketing and online advertising. Though many studies have made significant progress, few of them pay much attention to the integration between popularity prediction with temporal alignment. In&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.04446v1-abstract-full').style.display = 'inline'; document.getElementById('2503.04446v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.04446v1-abstract-full" style="display: none;"> Social media popularity prediction task aims to predict the popularity of posts on social media platforms, which has a positive driving effect on application scenarios such as content optimization, digital marketing and online advertising. Though many studies have made significant progress, few of them pay much attention to the integration between popularity prediction with temporal alignment. In this paper, with exploring YouTube&#39;s multilingual and multi-modal content, we construct a new social media temporal popularity prediction benchmark, namely SMTPD, and suggest a baseline framework for temporal popularity prediction. Through data analysis and experiments, we verify that temporal alignment and early popularity play crucial roles in social media popularity prediction for not only deepening the understanding of temporal dynamics of popularity in social media but also offering a suggestion about developing more effective prediction models in this field. Code is available at https://github.com/zhuwei321/SMTPD. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.04446v1-abstract-full').style.display = 'none'; document.getElementById('2503.04446v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accept by CVPR 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.20560">arXiv:2502.20560</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.20560">pdf</a>, <a href="https://arxiv.org/format/2502.20560">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Towards Statistical Factuality Guarantee for Large Vision-Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zhuohang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chao Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Jackson%2C+N+J">Nicholas J. Jackson</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+W">Wendi Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+B">Bo Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jiaxin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Malin%2C+B+A">Bradley A. Malin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.20560v1-abstract-short" style="display: inline;"> Advancements in Large Vision-Language Models (LVLMs) have demonstrated promising performance in a variety of vision-language tasks involving image-conditioned free-form text generation. However, growing concerns about hallucinations in LVLMs, where the generated text is inconsistent with the visual context, are becoming a major impediment to deploying these models in applications that demand guara&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.20560v1-abstract-full').style.display = 'inline'; document.getElementById('2502.20560v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.20560v1-abstract-full" style="display: none;"> Advancements in Large Vision-Language Models (LVLMs) have demonstrated promising performance in a variety of vision-language tasks involving image-conditioned free-form text generation. However, growing concerns about hallucinations in LVLMs, where the generated text is inconsistent with the visual context, are becoming a major impediment to deploying these models in applications that demand guaranteed reliability. In this paper, we introduce a framework to address this challenge, ConfLVLM, which is grounded on conformal prediction to achieve finite-sample distribution-free statistical guarantees on the factuality of LVLM output. This framework treats an LVLM as a hypothesis generator, where each generated text detail (or claim) is considered an individual hypothesis. It then applies a statistical hypothesis testing procedure to verify each claim using efficient heuristic uncertainty measures to filter out unreliable claims before returning any responses to users. We conduct extensive experiments covering three representative application domains, including general scene understanding, medical radiology report generation, and document understanding. Remarkably, ConfLVLM reduces the error rate of claims generated by LLaVa-1.5 for scene descriptions from 87.8\% to 10.0\% by filtering out erroneous claims with a 95.3\% true positive rate. Our results further demonstrate that ConfLVLM is highly flexible, and can be applied to any black-box LVLMs paired with any uncertainty measure for any image-conditioned free-form text generation task while providing a rigorous guarantee on controlling the risk of hallucination. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.20560v1-abstract-full').style.display = 'none'; document.getElementById('2502.20560v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.15629">arXiv:2502.15629</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.15629">pdf</a>, <a href="https://arxiv.org/ps/2502.15629">ps</a>, <a href="https://arxiv.org/format/2502.15629">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Mildly Accurate Computationally Differentially Private Inner Product Protocols Imply Oblivious Transfer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Haitner%2C+I">Iftach Haitner</a>, <a href="/search/cs?searchtype=author&amp;query=Mazor%2C+N">Noam Mazor</a>, <a href="/search/cs?searchtype=author&amp;query=Silbak%2C+J">Jad Silbak</a>, <a href="/search/cs?searchtype=author&amp;query=Tsfadia%2C+E">Eliad Tsfadia</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chao Yan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.15629v1-abstract-short" style="display: inline;"> In distributed differential privacy, multiple parties collaboratively analyze their combined data while protecting the privacy of each party&#39;s data from the eyes of the others. Interestingly, for certain fundamental two-party functions like inner product and Hamming distance, the accuracy of distributed solutions significantly lags behind what can be achieved in the centralized model. However, und&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.15629v1-abstract-full').style.display = 'inline'; document.getElementById('2502.15629v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.15629v1-abstract-full" style="display: none;"> In distributed differential privacy, multiple parties collaboratively analyze their combined data while protecting the privacy of each party&#39;s data from the eyes of the others. Interestingly, for certain fundamental two-party functions like inner product and Hamming distance, the accuracy of distributed solutions significantly lags behind what can be achieved in the centralized model. However, under computational differential privacy, these limitations can be circumvented using oblivious transfer via secure multi-party computation. Yet, no results show that oblivious transfer is indeed necessary for accurately estimating a non-Boolean functionality. In particular, for the inner-product functionality, it was previously unknown whether oblivious transfer is necessary even for the best possible constant additive error. In this work, we prove that any computationally differentially private protocol that estimates the inner product over $\{-1,1\}^n \times \{-1,1\}^n$ up to an additive error of $O(n^{1/6})$, can be used to construct oblivious transfer. In particular, our result implies that protocols with sub-polynomial accuracy are equivalent to oblivious transfer. In this accuracy regime, our result improves upon Haitner, Mazor, Silbak, and Tsfadia [STOC &#39;22] who showed that a key-agreement protocol is necessary. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.15629v1-abstract-full').style.display = 'none'; document.getElementById('2502.15629v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.12669">arXiv:2502.12669</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.12669">pdf</a>, <a href="https://arxiv.org/format/2502.12669">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Perovskite-LLM: Knowledge-Enhanced Large Language Models for Perovskite Solar Cell Research </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xiang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+P">Penglei Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+S">Shuyan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Longhan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+P">Peijie Dong</a>, <a href="/search/cs?searchtype=author&amp;query=You%2C+H">Huajie You</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yongqi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Chu%2C+X">Xiaowen Chu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+T">Tong-yi Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.12669v1-abstract-short" style="display: inline;"> The rapid advancement of perovskite solar cells (PSCs) has led to an exponential growth in research publications, creating an urgent need for efficient knowledge management and reasoning systems in this domain. We present a comprehensive knowledge-enhanced system for PSCs that integrates three key components. First, we develop Perovskite-KG, a domain-specific knowledge graph constructed from 1,517&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12669v1-abstract-full').style.display = 'inline'; document.getElementById('2502.12669v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.12669v1-abstract-full" style="display: none;"> The rapid advancement of perovskite solar cells (PSCs) has led to an exponential growth in research publications, creating an urgent need for efficient knowledge management and reasoning systems in this domain. We present a comprehensive knowledge-enhanced system for PSCs that integrates three key components. First, we develop Perovskite-KG, a domain-specific knowledge graph constructed from 1,517 research papers, containing 23,789 entities and 22,272 relationships. Second, we create two complementary datasets: Perovskite-Chat, comprising 55,101 high-quality question-answer pairs generated through a novel multi-agent framework, and Perovskite-Reasoning, containing 2,217 carefully curated materials science problems. Third, we introduce two specialized large language models: Perovskite-Chat-LLM for domain-specific knowledge assistance and Perovskite-Reasoning-LLM for scientific reasoning tasks. Experimental results demonstrate that our system significantly outperforms existing models in both domain-specific knowledge retrieval and scientific reasoning tasks, providing researchers with effective tools for literature review, experimental design, and complex problem-solving in PSC research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12669v1-abstract-full').style.display = 'none'; document.getElementById('2502.12669v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.12117">arXiv:2502.12117</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.12117">pdf</a>, <a href="https://arxiv.org/format/2502.12117">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> </div> </div> <p class="title is-5 mathjax"> The Role of Prescreening in Auctions with Predictions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Y">Yanwei Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+F">Fupeng Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chiwei Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+J">Jiahua Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.12117v1-abstract-short" style="display: inline;"> We consider an auction environment with i.i.d. privately known valuations. Equipped with a noisy predictor, the auction designer receives a coarse signal about each player&#39;s valuation, where the signal is fully informative with a given probability. Based on the posterior expectation of the valuations, the designer selects the top players to admit -- a procedure we call \emph{prescreening}. We show&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12117v1-abstract-full').style.display = 'inline'; document.getElementById('2502.12117v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.12117v1-abstract-full" style="display: none;"> We consider an auction environment with i.i.d. privately known valuations. Equipped with a noisy predictor, the auction designer receives a coarse signal about each player&#39;s valuation, where the signal is fully informative with a given probability. Based on the posterior expectation of the valuations, the designer selects the top players to admit -- a procedure we call \emph{prescreening}. We show that this prescreening game is equivalent to a standard auction without prescreening but with \emph{correlated} types. Besides, when the signals are always fully informative, these correlated types are \emph{affiliated}. We characterize conditions for the existence of a symmetric and strictly monotone equilibrium strategy in both all-pay and first-price auctions. Our results reveal that prescreening can significantly improve the designer&#39;s revenue in all-pay auctions; in fact, when the prediction accuracy is one, admitting only two players is optimal. In contrast, prescreening is usually unnecessary in first-price auctions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12117v1-abstract-full').style.display = 'none'; document.getElementById('2502.12117v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.11946">arXiv:2502.11946</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.11946">pdf</a>, <a href="https://arxiv.org/format/2502.11946">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Step-Audio: Unified Understanding and Generation in Intelligent Speech Interaction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+A">Ailin Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+B">Boyong Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+B">Bruce Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chao Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+C">Chen Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+C">Chengli Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+F">Fei Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+F">Feiyu Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+J">Jingbei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+M">Mingrui Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+P">Peng Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Miao%2C+R">Ruihang Miao</a>, <a href="/search/cs?searchtype=author&amp;query=You%2C+W">Wang You</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+X">Xuerui Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Y">Yechang Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuxiang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+Z">Zheng Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zixin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+H">Hongyu Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+J">Jianjian Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+B">Brian Li</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+C">Chengting Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Wan%2C+C">Changyi Wan</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+H">Hanpeng Hu</a> , et al. (120 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.11946v2-abstract-short" style="display: inline;"> Real-time speech interaction, serving as a fundamental interface for human-machine collaboration, holds immense potential. However, current open-source models face limitations such as high costs in voice data collection, weakness in dynamic control, and limited intelligence. To address these challenges, this paper introduces Step-Audio, the first production-ready open-source solution. Key contribu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.11946v2-abstract-full').style.display = 'inline'; document.getElementById('2502.11946v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.11946v2-abstract-full" style="display: none;"> Real-time speech interaction, serving as a fundamental interface for human-machine collaboration, holds immense potential. However, current open-source models face limitations such as high costs in voice data collection, weakness in dynamic control, and limited intelligence. To address these challenges, this paper introduces Step-Audio, the first production-ready open-source solution. Key contributions include: 1) a 130B-parameter unified speech-text multi-modal model that achieves unified understanding and generation, with the Step-Audio-Chat version open-sourced; 2) a generative speech data engine that establishes an affordable voice cloning framework and produces the open-sourced lightweight Step-Audio-TTS-3B model through distillation; 3) an instruction-driven fine control system enabling dynamic adjustments across dialects, emotions, singing, and RAP; 4) an enhanced cognitive architecture augmented with tool calling and role-playing abilities to manage complex tasks effectively. Based on our new StepEval-Audio-360 evaluation benchmark, Step-Audio achieves state-of-the-art performance in human evaluations, especially in terms of instruction following. On open-source benchmarks like LLaMA Question, shows 9.3% average performance improvement, demonstrating our commitment to advancing the development of open-source multi-modal language technologies. Our code and models are available at https://github.com/stepfun-ai/Step-Audio. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.11946v2-abstract-full').style.display = 'none'; document.getElementById('2502.11946v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.09608">arXiv:2502.09608</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.09608">pdf</a>, <a href="https://arxiv.org/format/2502.09608">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Graphics">cs.GR</span> </div> </div> <p class="title is-5 mathjax"> Instance Segmentation of Scene Sketches Using Natural Image Priors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tang%2C+M">Mia Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Vinker%2C+Y">Yael Vinker</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chuan Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Lvmin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Agrawala%2C+M">Maneesh Agrawala</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.09608v1-abstract-short" style="display: inline;"> Sketch segmentation involves grouping pixels within a sketch that belong to the same object or instance. It serves as a valuable tool for sketch editing tasks, such as moving, scaling, or removing specific components. While image segmentation models have demonstrated remarkable capabilities in recent years, sketches present unique challenges for these models due to their sparse nature and wide var&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09608v1-abstract-full').style.display = 'inline'; document.getElementById('2502.09608v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.09608v1-abstract-full" style="display: none;"> Sketch segmentation involves grouping pixels within a sketch that belong to the same object or instance. It serves as a valuable tool for sketch editing tasks, such as moving, scaling, or removing specific components. While image segmentation models have demonstrated remarkable capabilities in recent years, sketches present unique challenges for these models due to their sparse nature and wide variation in styles. We introduce SketchSeg, a method for instance segmentation of raster scene sketches. Our approach adapts state-of-the-art image segmentation and object detection models to the sketch domain by employing class-agnostic fine-tuning and refining segmentation masks using depth cues. Furthermore, our method organizes sketches into sorted layers, where occluded instances are inpainted, enabling advanced sketch editing applications. As existing datasets in this domain lack variation in sketch styles, we construct a synthetic scene sketch segmentation dataset featuring sketches with diverse brush strokes and varying levels of detail. We use this dataset to demonstrate the robustness of our approach and will release it to promote further research in the field. Project webpage: https://sketchseg.github.io/sketch-seg/ <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09608v1-abstract-full').style.display = 'none'; document.getElementById('2502.09608v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06818">arXiv:2502.06818</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.06818">pdf</a>, <a href="https://arxiv.org/format/2502.06818">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Globality Strikes Back: Rethinking the Global Knowledge of CLIP in Training-Free Open-Vocabulary Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jingyun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Cilin Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Kang%2C+G">Guoliang Kang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06818v1-abstract-short" style="display: inline;"> Recent works modify CLIP to perform open-vocabulary semantic segmentation in a training-free manner (TF-OVSS). In CLIP, patch-wise image representations mainly encode the homogeneous image-level properties and thus are not discriminative enough, hindering its application to the dense prediction task. Previous works make image features more distinct across patches, through making each patch mainly&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06818v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06818v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06818v1-abstract-full" style="display: none;"> Recent works modify CLIP to perform open-vocabulary semantic segmentation in a training-free manner (TF-OVSS). In CLIP, patch-wise image representations mainly encode the homogeneous image-level properties and thus are not discriminative enough, hindering its application to the dense prediction task. Previous works make image features more distinct across patches, through making each patch mainly attend to itself or the neighboring patches within a narrow local window. However, with their modifications, the ability of CLIP to aggregate global context information, which is known to be useful for distinguishing confusing categories, is largely weakened. In this paper, we propose a new method named GCLIP, which mines the beneficial global knowledge of CLIP to facilitate the TF-OVSS task. Firstly, we aim to equip the last-block attention with image-level properties while not introducing homogeneous attention patterns across patches. In GCLIP, we merge the attention from the global token emerging blocks with the Query-Query attention to realize this goal. Secondly, we aim to make the Value embeddings of the last-block attention module more distinct and semantically correlated. To realize this, we design a novel channel suppression strategy. As the representation of each patch is finally determined by the attention weights and the Value embeddings, our method can generate more discriminative patch-level image features while absorbing global context information. Extensive experiments on five standard benchmarks demonstrate that our method consistently outperforms previous state-of-the-arts. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06818v1-abstract-full').style.display = 'none'; document.getElementById('2502.06818v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.13859">arXiv:2501.13859</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.13859">pdf</a>, <a href="https://arxiv.org/format/2501.13859">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Learning Visual Proxy for Compositional Zero-Shot Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+S">Shiyu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Cheng Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Jing%2C+C">Chenchen Jing</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+L">Lei Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wenjun Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.13859v3-abstract-short" style="display: inline;"> Compositional Zero-Shot Learning (CZSL) aims to recognize novel attribute-object compositions by leveraging knowledge from seen compositions. Existing methods align textual prototypes with visual features through Vision-Language Models (VLMs), but they face two key limitations: (1) modality gaps hinder the discrimination of semantically similar composition pairs, and (2) single-modal textual proto&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.13859v3-abstract-full').style.display = 'inline'; document.getElementById('2501.13859v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.13859v3-abstract-full" style="display: none;"> Compositional Zero-Shot Learning (CZSL) aims to recognize novel attribute-object compositions by leveraging knowledge from seen compositions. Existing methods align textual prototypes with visual features through Vision-Language Models (VLMs), but they face two key limitations: (1) modality gaps hinder the discrimination of semantically similar composition pairs, and (2) single-modal textual prototypes lack fine-grained visual cues, creating bottlenecks in VLM-based CZSL. In this paper, we introduce Visual Proxy Learning, a novel approach that facilitates the learning of distinct visual distributions, effectively reducing the modality gap and improving compositional generalization performance. Specifically, we initialize visual proxies for various attributes, objects, and their compositions using text representations. By optimizing the visual space, we capture fine-grained visual cues and guide the learning of more discriminative visual representations for attributes, objects and compositions. Furthermore, we propose an effective Cross-Modal Joint Learning (CMJL) strategy that imposes cross-modal constraints between the original text-image space and the fine-grained visual space. This approach not only boosts generalization for previously unseen composition pairs but also sharpens the discrimination of similar pairs, fostering more robust and precise learning. Extensive experiments demonstrate state-of-the-art performance in closed-world scenarios and competitive open-world results across four established CZSL benchmarks, validating the effectiveness of our approach in advancing compositional generalization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.13859v3-abstract-full').style.display = 'none'; document.getElementById('2501.13859v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.08695">arXiv:2501.08695</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.08695">pdf</a>, <a href="https://arxiv.org/format/2501.08695">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> Real-time Indexing for Large-scale Recommendation by Streaming Vector Quantization Retriever </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bin%2C+X">Xingyan Bin</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+J">Jianfei Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+W">Wujie Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Z">Zhichen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+X">Xintian Han</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chongyang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+F">Feng Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+X">Xun Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Q">Qi Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zuotao Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.08695v1-abstract-short" style="display: inline;"> Retrievers, which form one of the most important recommendation stages, are responsible for efficiently selecting possible positive samples to the later stages under strict latency limitations. Because of this, large-scale systems always rely on approximate calculations and indexes to roughly shrink candidate scale, with a simple ranking model. Considering simple models lack the ability to produce&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.08695v1-abstract-full').style.display = 'inline'; document.getElementById('2501.08695v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.08695v1-abstract-full" style="display: none;"> Retrievers, which form one of the most important recommendation stages, are responsible for efficiently selecting possible positive samples to the later stages under strict latency limitations. Because of this, large-scale systems always rely on approximate calculations and indexes to roughly shrink candidate scale, with a simple ranking model. Considering simple models lack the ability to produce precise predictions, most of the existing methods mainly focus on incorporating complicated ranking models. However, another fundamental problem of index effectiveness remains unresolved, which also bottlenecks complication. In this paper, we propose a novel index structure: streaming Vector Quantization model, as a new generation of retrieval paradigm. Streaming VQ attaches items with indexes in real time, granting it immediacy. Moreover, through meticulous verification of possible variants, it achieves additional benefits like index balancing and reparability, enabling it to support complicated ranking models as existing approaches. As a lightweight and implementation-friendly architecture, streaming VQ has been deployed and replaced all major retrievers in Douyin and Douyin Lite, resulting in remarkable user engagement gain. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.08695v1-abstract-full').style.display = 'none'; document.getElementById('2501.08695v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.08352">arXiv:2501.08352</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.08352">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Digital Libraries">cs.DL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3677389.370251">10.1145/3677389.370251 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A Preliminary Survey of Semantic Descriptive Model for Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chengxi Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Jian%2C+J">Jie Jian</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yang Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.08352v1-abstract-short" style="display: inline;"> Considering the lack of a unified framework for image description and deep cultural analysis at the subject level in the field of Ancient Chinese Paintings (ACP), this study utilized the Beijing Palace Museum&#39;s ACP collections to develop a semantic model integrating the iconological theory with a new workflow for term extraction and mapping. Our findings underscore the model&#39;s effectiveness. SDM c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.08352v1-abstract-full').style.display = 'inline'; document.getElementById('2501.08352v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.08352v1-abstract-full" style="display: none;"> Considering the lack of a unified framework for image description and deep cultural analysis at the subject level in the field of Ancient Chinese Paintings (ACP), this study utilized the Beijing Palace Museum&#39;s ACP collections to develop a semantic model integrating the iconological theory with a new workflow for term extraction and mapping. Our findings underscore the model&#39;s effectiveness. SDM can be used to support further art-related knowledge organization and cultural exploration of ACPs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.08352v1-abstract-full').style.display = 'none'; document.getElementById('2501.08352v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">3 pages, 2 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68-06 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> J.5 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> JCDL &#39;2024: Proceedings of the 24th ACM/IEEE Joint Conference on Digital Libraries </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.00053">arXiv:2501.00053</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.00053">pdf</a>, <a href="https://arxiv.org/format/2501.00053">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Implementing Trust in Non-Small Cell Lung Cancer Diagnosis with a Conformalized Uncertainty-Aware AI Framework in Whole-Slide Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xiaoge Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+T">Tao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chao Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Najdawi%2C+F">Fedaa Najdawi</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+K">Kai Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Y">Yuan Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Cheung%2C+Y">Yiu-ming Cheung</a>, <a href="/search/cs?searchtype=author&amp;query=Malin%2C+B+A">Bradley A. Malin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.00053v1-abstract-short" style="display: inline;"> Ensuring trustworthiness is fundamental to the development of artificial intelligence (AI) that is considered societally responsible, particularly in cancer diagnostics, where a misdiagnosis can have dire consequences. Current digital pathology AI models lack systematic solutions to address trustworthiness concerns arising from model limitations and data discrepancies between model deployment and&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.00053v1-abstract-full').style.display = 'inline'; document.getElementById('2501.00053v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.00053v1-abstract-full" style="display: none;"> Ensuring trustworthiness is fundamental to the development of artificial intelligence (AI) that is considered societally responsible, particularly in cancer diagnostics, where a misdiagnosis can have dire consequences. Current digital pathology AI models lack systematic solutions to address trustworthiness concerns arising from model limitations and data discrepancies between model deployment and development environments. To address this issue, we developed TRUECAM, a framework designed to ensure both data and model trustworthiness in non-small cell lung cancer subtyping with whole-slide images. TRUECAM integrates 1) a spectral-normalized neural Gaussian process for identifying out-of-scope inputs and 2) an ambiguity-guided elimination of tiles to filter out highly ambiguous regions, addressing data trustworthiness, as well as 3) conformal prediction to ensure controlled error rates. We systematically evaluated the framework across multiple large-scale cancer datasets, leveraging both task-specific and foundation models, illustrate that an AI model wrapped with TRUECAM significantly outperforms models that lack such guidance, in terms of classification accuracy, robustness, interpretability, and data efficiency, while also achieving improvements in fairness. These findings highlight TRUECAM as a versatile wrapper framework for digital pathology AI models with diverse architectural designs, promoting their responsible and effective applications in real-world settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.00053v1-abstract-full').style.display = 'none'; document.getElementById('2501.00053v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.16864">arXiv:2412.16864</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.16864">pdf</a>, <a href="https://arxiv.org/format/2412.16864">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> </div> <p class="title is-5 mathjax"> Efficient Row-Level Lineage Leveraging Predicate Pushdown </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Y">Yin Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Cong Yan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.16864v1-abstract-short" style="display: inline;"> Row-level lineage explains what input rows produce an output row through a data processing pipeline, having many applications like data debugging, auditing, data integration, etc. Prior work on lineage falls in two lines: eager lineage tracking and lazy lineage inference. Eager tracking integrates lineage tracing tightly into the operator implementation, enabling efficient customized tracking. How&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.16864v1-abstract-full').style.display = 'inline'; document.getElementById('2412.16864v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.16864v1-abstract-full" style="display: none;"> Row-level lineage explains what input rows produce an output row through a data processing pipeline, having many applications like data debugging, auditing, data integration, etc. Prior work on lineage falls in two lines: eager lineage tracking and lazy lineage inference. Eager tracking integrates lineage tracing tightly into the operator implementation, enabling efficient customized tracking. However, this approach is intrusive, system-specific, and lacks adaptability. In contrast, lazy inference generates additional queries to compute lineage; it can be easily applied to any database, but the lineage query is usually slow. Furthermore, both approaches have limited coverage of the type of data processing pipeline supported due to operator-specific tracking or inference rules. In this work, we propose PredTrace, a lineage inference approach that achieves easy adaptation, low runtime overhead, efficient lineage querying, and high pipeline coverage. It achieves this by leveraging predicate pushdown: pushing a row-selection predicate that describes the target output down to source tables and querying the lineage by running the pushed-down predicate. PredTrace may require saving intermediate results when running the pipeline in order to compute the precise lineage. When this is not viable, it can still infer lineage but may return a superset. Compared to prior work, PredTrace achieves higher coverage on TPC-H queries as well as 70 sampled real-world data processing pipelines in which UDFs are widely used. It can infer lineage in seconds, outperforming prior lazy approaches by up to 10x. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.16864v1-abstract-full').style.display = 'none'; document.getElementById('2412.16864v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.16262">arXiv:2412.16262</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.16262">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> VirusT5: Harnessing Large Language Models to Predicting SARS-CoV-2 Evolution </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Marathe%2C+V">Vishwajeet Marathe</a>, <a href="/search/cs?searchtype=author&amp;query=Bajracharya%2C+D">Deewan Bajracharya</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Changhui Yan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.16262v1-abstract-short" style="display: inline;"> During a virus&#39;s evolution,various regions of the genome are subjected to distinct levels of functional constraints.Combined with factors like codon bias and DNA repair efficiency,these constraints contribute to unique mutation patterns within the genome or a specific gene. In this project, we harnessed the power of Large Language Models(LLMs) to predict the evolution of SARS-CoV-2. By treating th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.16262v1-abstract-full').style.display = 'inline'; document.getElementById('2412.16262v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.16262v1-abstract-full" style="display: none;"> During a virus&#39;s evolution,various regions of the genome are subjected to distinct levels of functional constraints.Combined with factors like codon bias and DNA repair efficiency,these constraints contribute to unique mutation patterns within the genome or a specific gene. In this project, we harnessed the power of Large Language Models(LLMs) to predict the evolution of SARS-CoV-2. By treating the mutation process from one generation to the next as a translation task, we trained a transformer model, called VirusT5, to capture the mutation patterns underlying SARS-CoV-2 evolution. We evaluated the VirusT5&#39;s ability to detect these mutation patterns including its ability to identify mutation hotspots and explored the potential of using VirusT5 to predict future virus variants. Our findings demonstrate the feasibility of using a large language model to model viral evolution as a translation process. This study establishes the groundbreaking concept of &#34;mutation-as-translation,&#34; paving the way for new methodologies and tools for combating virus threats <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.16262v1-abstract-full').style.display = 'none'; document.getElementById('2412.16262v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This is a preprint of a paper submitted to IEEE for consideration</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.11535">arXiv:2412.11535</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.11535">pdf</a>, <a href="https://arxiv.org/format/2412.11535">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Relative Distance Guided Dynamic Partition Learning for Scale-Invariant UAV-View Geo-Localization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Q">Quan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+T">Tingyu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+R">Rongfeng Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+B">Bolun Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+Z">Zhedong Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenggang Yan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.11535v2-abstract-short" style="display: inline;"> UAV-view Geo-Localization~(UVGL) presents substantial challenges, particularly due to the disparity in visual appearance between drone-captured imagery and satellite perspectives. Existing methods usually assume consistent scaling factor across different views. Therefore, they adopt predefined partition alignment and extract viewpoint-invariant representation by constructing a variety of part-leve&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.11535v2-abstract-full').style.display = 'inline'; document.getElementById('2412.11535v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.11535v2-abstract-full" style="display: none;"> UAV-view Geo-Localization~(UVGL) presents substantial challenges, particularly due to the disparity in visual appearance between drone-captured imagery and satellite perspectives. Existing methods usually assume consistent scaling factor across different views. Therefore, they adopt predefined partition alignment and extract viewpoint-invariant representation by constructing a variety of part-level features. However, the scaling assumption is not always hold in the real-world scenarios that variations of UAV flight state leads to the scale mismatch of cross-views, resulting in serious performance degradation. To overcome this issue, we propose a partition learning framework based on relative distance, which alleviates the dependence on scale consistency while mining fine-grained features. Specifically, we propose a distance guided dynamic partition learning strategy~(DGDPL), consisting of a square partition strategy and a distance-guided adjustment strategy. The former is utilized to extract fine-grained features and global features in a simple manner. The latter calculates the relative distance ratio between drone- and satellite-view to adjust the partition size, thereby explicitly aligning the semantic information between partition pairs. Furthermore, we propose a saliency-guided refinement strategy to refine part-level features, so as to further improve the retrieval accuracy. Extensive experiments show that our approach achieves superior geo-localization accuracy across various scale-inconsistent scenarios, and exhibits remarkable robustness against scale variations. The code will be released. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.11535v2-abstract-full').style.display = 'none'; document.getElementById('2412.11535v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">In Peer Review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.10432">arXiv:2412.10432</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.10432">pdf</a>, <a href="https://arxiv.org/format/2412.10432">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Imitate Before Detect: Aligning Machine Stylistic Preference for Machine-Revised Text Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jiaqi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+X">Xiaoye Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+T">Tianyang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Ying Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xinhui Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+Y">Yiwen Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Leong%2C+C+T">Chak Tou Leong</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zuchao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Long%2C+T">Tang Long</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Lei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenyu Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Mei%2C+G">Guanghao Mei</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jie Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Lefei Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.10432v2-abstract-short" style="display: inline;"> Large Language Models (LLMs) have revolutionized text generation, making detecting machine-generated text increasingly challenging. Although past methods have achieved good performance on detecting pure machine-generated text, those detectors have poor performance on distinguishing machine-revised text (rewriting, expansion, and polishing), which can have only minor changes from its original human&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.10432v2-abstract-full').style.display = 'inline'; document.getElementById('2412.10432v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.10432v2-abstract-full" style="display: none;"> Large Language Models (LLMs) have revolutionized text generation, making detecting machine-generated text increasingly challenging. Although past methods have achieved good performance on detecting pure machine-generated text, those detectors have poor performance on distinguishing machine-revised text (rewriting, expansion, and polishing), which can have only minor changes from its original human prompt. As the content of text may originate from human prompts, detecting machine-revised text often involves identifying distinctive machine styles, e.g., worded favored by LLMs. However, existing methods struggle to detect machine-style phrasing hidden within the content contributed by humans. We propose the &#34;Imitate Before Detect&#34; (ImBD) approach, which first imitates the machine-style token distribution, and then compares the distribution of the text to be tested with the machine-style distribution to determine whether the text has been machine-revised. To this end, we introduce style preference optimization (SPO), which aligns a scoring LLM model to the preference of text styles generated by machines. The aligned scoring model is then used to calculate the style-conditional probability curvature (Style-CPC), quantifying the log probability difference between the original and conditionally sampled texts for effective detection. We conduct extensive comparisons across various scenarios, encompassing text revisions by six LLMs, four distinct text domains, and three machine revision types. Compared to existing state-of-the-art methods, our method yields a 13% increase in AUC for detecting text revised by open-source LLMs, and improves performance by 5% and 19% for detecting GPT-3.5 and GPT-4o revised text, respectively. Notably, our method surpasses the commercially trained GPT-Zero with just $1,000$ samples and five minutes of SPO, demonstrating its efficiency and effectiveness. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.10432v2-abstract-full').style.display = 'none'; document.getElementById('2412.10432v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear at AAAI 2025. 14 pages, 6 figure</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.00131">arXiv:2412.00131</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.00131">pdf</a>, <a href="https://arxiv.org/format/2412.00131">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Open-Sora Plan: Open-Source Large Video Generation Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lin%2C+B">Bin Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Ge%2C+Y">Yunyang Ge</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+X">Xinhua Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zongjian Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+B">Bin Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shaodong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+X">Xianyi He</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+Y">Yang Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+S">Shenghai Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+L">Liuhan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+T">Tanghui Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Junwu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+Z">Zhenyu Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Pang%2C+Y">Yatian Pang</a>, <a href="/search/cs?searchtype=author&amp;query=She%2C+B">Bin She</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Cen Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Z">Zhiheng Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+X">Xiaoyi Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+L">Lin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+Z">Zhang Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+X">Xing Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+S">Shaoling Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+Y">Yonghong Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+L">Li Yuan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.00131v1-abstract-short" style="display: inline;"> We introduce Open-Sora Plan, an open-source project that aims to contribute a large generation model for generating desired high-resolution videos with long durations based on various user inputs. Our project comprises multiple components for the entire video generation process, including a Wavelet-Flow Variational Autoencoder, a Joint Image-Video Skiparse Denoiser, and various condition controlle&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.00131v1-abstract-full').style.display = 'inline'; document.getElementById('2412.00131v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.00131v1-abstract-full" style="display: none;"> We introduce Open-Sora Plan, an open-source project that aims to contribute a large generation model for generating desired high-resolution videos with long durations based on various user inputs. Our project comprises multiple components for the entire video generation process, including a Wavelet-Flow Variational Autoencoder, a Joint Image-Video Skiparse Denoiser, and various condition controllers. Moreover, many assistant strategies for efficient training and inference are designed, and a multi-dimensional data curation pipeline is proposed for obtaining desired high-quality data. Benefiting from efficient thoughts, our Open-Sora Plan achieves impressive video generation results in both qualitative and quantitative evaluations. We hope our careful design and practical experience can inspire the video generation research community. All our codes and model weights are publicly available at \url{https://github.com/PKU-YuanGroup/Open-Sora-Plan}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.00131v1-abstract-full').style.display = 'none'; document.getElementById('2412.00131v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">v1.3</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16064">arXiv:2411.16064</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16064">pdf</a>, <a href="https://arxiv.org/format/2411.16064">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Multi-Granularity Class Prototype Topology Distillation for Class-Incremental Source-Free Unsupervised Domain Adaptation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+P">Peihua Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jiehua Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Sheng%2C+X">Xichun Sheng</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenggang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Y">Yaoqi Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+Y">Ying Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+L">Liang Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16064v3-abstract-short" style="display: inline;"> This paper explores the Class-Incremental Source-Free Unsupervised Domain Adaptation (CI-SFUDA) problem, where the unlabeled target data come incrementally without access to labeled source instances. This problem poses two challenges, the interference of similar source-class knowledge in target-class representation learning and the shocks of new target knowledge to old ones. To address them, we pr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16064v3-abstract-full').style.display = 'inline'; document.getElementById('2411.16064v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16064v3-abstract-full" style="display: none;"> This paper explores the Class-Incremental Source-Free Unsupervised Domain Adaptation (CI-SFUDA) problem, where the unlabeled target data come incrementally without access to labeled source instances. This problem poses two challenges, the interference of similar source-class knowledge in target-class representation learning and the shocks of new target knowledge to old ones. To address them, we propose the Multi-Granularity Class Prototype Topology Distillation (GROTO) algorithm, which effectively transfers the source knowledge to the class-incremental target domain. Concretely, we design the multi-granularity class prototype self-organization module and the prototype topology distillation module. First, we mine the positive classes by modeling accumulation distributions. Next, we introduce multi-granularity class prototypes to generate reliable pseudo-labels, and exploit them to promote the positive-class target feature self-organization. Second, the positive-class prototypes are leveraged to construct the topological structures of source and target feature spaces. Then, we perform the topology distillation to continually mitigate the shocks of new target knowledge to old ones. Extensive experiments demonstrate that our proposed method achieves state-of-the-art performance on three public datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16064v3-abstract-full').style.display = 'none'; document.getElementById('2411.16064v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by CVPR 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10742">arXiv:2411.10742</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10742">pdf</a>, <a href="https://arxiv.org/format/2411.10742">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> It Takes Two: Accurate Gait Recognition in the Wild via Cross-granularity Alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+J">Jinkai Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xinchen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+B">Boyue Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenggang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jiyong Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+W">Wu Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yongdong Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10742v1-abstract-short" style="display: inline;"> Existing studies for gait recognition primarily utilized sequences of either binary silhouette or human parsing to encode the shapes and dynamics of persons during walking. Silhouettes exhibit accurate segmentation quality and robustness to environmental variations, but their low information entropy may result in sub-optimal performance. In contrast, human parsing provides fine-grained part segmen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10742v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10742v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10742v1-abstract-full" style="display: none;"> Existing studies for gait recognition primarily utilized sequences of either binary silhouette or human parsing to encode the shapes and dynamics of persons during walking. Silhouettes exhibit accurate segmentation quality and robustness to environmental variations, but their low information entropy may result in sub-optimal performance. In contrast, human parsing provides fine-grained part segmentation with higher information entropy, but the segmentation quality may deteriorate due to the complex environments. To discover the advantages of silhouette and parsing and overcome their limitations, this paper proposes a novel cross-granularity alignment gait recognition method, named XGait, to unleash the power of gait representations of different granularity. To achieve this goal, the XGait first contains two branches of backbone encoders to map the silhouette sequences and the parsing sequences into two latent spaces, respectively. Moreover, to explore the complementary knowledge across the features of two representations, we design the Global Cross-granularity Module (GCM) and the Part Cross-granularity Module (PCM) after the two encoders. In particular, the GCM aims to enhance the quality of parsing features by leveraging global features from silhouettes, while the PCM aligns the dynamics of human parts between silhouette and parsing features using the high information entropy in parsing sequences. In addition, to effectively guide the alignment of two representations with different granularity at the part level, an elaborate-designed learnable division mechanism is proposed for the parsing features. Comprehensive experiments on two large-scale gait datasets not only show the superior performance of XGait with the Rank-1 accuracy of 80.5% on Gait3D and 88.3% CCPG but also reflect the robustness of the learned features even under challenging conditions like occlusions and cloth changes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10742v1-abstract-full').style.display = 'none'; document.getElementById('2411.10742v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 9 figures; Accepted by ACM MM 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07588">arXiv:2411.07588</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07588">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> A High-frequency Pneumatic Oscillator for Soft Robotics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+L">Longchuan Li</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+S">Shuqian He</a>, <a href="/search/cs?searchtype=author&amp;query=Qi%2C+Q">Qiukai Qi</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+Y">Ye Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Cong Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+K">Kaige Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Kang%2C+S">Shuai Kang</a>, <a href="/search/cs?searchtype=author&amp;query=Tokuda%2C+I+T">Isao T. Tokuda</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhongkui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+S">Shugen Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Huaping Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07588v1-abstract-short" style="display: inline;"> Soft robots, while highly adaptable to diverse environments through various actuation methods, still face significant performance boundary due to the inherent properties of materials. These limitations manifest in the challenge of guaranteeing rapid response and large-scale movements simultaneously, ultimately restricting the robots&#39; absolute speed and overall efficiency. In this paper, we introdu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07588v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07588v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07588v1-abstract-full" style="display: none;"> Soft robots, while highly adaptable to diverse environments through various actuation methods, still face significant performance boundary due to the inherent properties of materials. These limitations manifest in the challenge of guaranteeing rapid response and large-scale movements simultaneously, ultimately restricting the robots&#39; absolute speed and overall efficiency. In this paper, we introduce a high-frequency pneumatic oscillator (HIPO) to overcome these challenges. Through a collision-induced phase resetting mechanism, our HIPO leverages event-based nonlinearity to trigger self-oscillation of pneumatic actuator, which positively utilizes intrinsic characteristics of materials. This enables the system to spontaneously generate periodic control signals and directly produce motion responses, eliminating the need for incorporating external actuation components. By efficiently and rapidly converting internal energy of airflow into the kinetic energy of robots, HIPO achieves a frequency of up to 20 Hz. Furthermore, we demonstrate the versatility and high-performance capabilities of HIPO through bio-inspired robots: an insect-like fast-crawler (with speeds up to 50.27 cm/s), a high-frequency butterfly-like wing-flapper, and a maneuverable duck-like swimmer. By eliminating external components and seamlessly fusing signal generation, energy conversion, and motion output, HIPO unleashes rapid and efficient motion, unlocking potential for high-performance soft robotics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07588v1-abstract-full').style.display = 'none'; document.getElementById('2411.07588v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07446">arXiv:2411.07446</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07446">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Efficient and Accurate Prompt Optimization: the Benefit of Memory in Exemplar-Guided Reflection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Cilin Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jingyun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Lin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+R">Ruihui Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+X">Xiaopu Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Xiong%2C+K">Kai Xiong</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Q">Qingsong Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Kang%2C+G">Guoliang Kang</a>, <a href="/search/cs?searchtype=author&amp;query=Kang%2C+Y">Yangyang Kang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07446v1-abstract-short" style="display: inline;"> Automatic prompt engineering aims to enhance the generation quality of large language models (LLMs). Recent works utilize feedbacks generated from erroneous cases to guide the prompt optimization. During inference, they may further retrieve several semantically-related exemplars and concatenate them to the optimized prompts to improve the performance. However, those works only utilize the feedback&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07446v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07446v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07446v1-abstract-full" style="display: none;"> Automatic prompt engineering aims to enhance the generation quality of large language models (LLMs). Recent works utilize feedbacks generated from erroneous cases to guide the prompt optimization. During inference, they may further retrieve several semantically-related exemplars and concatenate them to the optimized prompts to improve the performance. However, those works only utilize the feedback at the current step, ignoring historical and unseleccted feedbacks which are potentially beneficial. Moreover, the selection of exemplars only considers the general semantic relationship and may not be optimal in terms of task performance and matching with the optimized prompt. In this work, we propose an Exemplar-Guided Reflection with Memory mechanism (ERM) to realize more efficient and accurate prompt optimization. Specifically, we design an exemplar-guided reflection mechanism where the feedback generation is additionally guided by the generated exemplars. We further build two kinds of memory to fully utilize the historical feedback information and support more effective exemplar retrieval. Empirical evaluations show our method surpasses previous state-of-the-arts with less optimization steps, i.e., improving F1 score by 10.1 on LIAR dataset, and reducing half of the optimization steps on ProTeGi. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07446v1-abstract-full').style.display = 'none'; document.getElementById('2411.07446v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.23159">arXiv:2410.23159</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.23159">pdf</a>, <a href="https://arxiv.org/format/2410.23159">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Fourier Amplitude and Correlation Loss: Beyond Using L2 Loss for Skillful Precipitation Nowcasting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chiu-Wai Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Foo%2C+S+Q">Shi Quan Foo</a>, <a href="/search/cs?searchtype=author&amp;query=Trinh%2C+V+H">Van Hoan Trinh</a>, <a href="/search/cs?searchtype=author&amp;query=Yeung%2C+D">Dit-Yan Yeung</a>, <a href="/search/cs?searchtype=author&amp;query=Wong%2C+K">Ka-Hing Wong</a>, <a href="/search/cs?searchtype=author&amp;query=Wong%2C+W">Wai-Kin Wong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.23159v1-abstract-short" style="display: inline;"> Deep learning approaches have been widely adopted for precipitation nowcasting in recent years. Previous studies mainly focus on proposing new model architectures to improve pixel-wise metrics. However, they frequently result in blurry predictions which provide limited utility to forecasting operations. In this work, we propose a new Fourier Amplitude and Correlation Loss (FACL) which consists of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23159v1-abstract-full').style.display = 'inline'; document.getElementById('2410.23159v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.23159v1-abstract-full" style="display: none;"> Deep learning approaches have been widely adopted for precipitation nowcasting in recent years. Previous studies mainly focus on proposing new model architectures to improve pixel-wise metrics. However, they frequently result in blurry predictions which provide limited utility to forecasting operations. In this work, we propose a new Fourier Amplitude and Correlation Loss (FACL) which consists of two novel loss terms: Fourier Amplitude Loss (FAL) and Fourier Correlation Loss (FCL). FAL regularizes the Fourier amplitude of the model prediction and FCL complements the missing phase information. The two loss terms work together to replace the traditional $L_2$ losses such as MSE and weighted MSE for the spatiotemporal prediction problem on signal-based data. Our method is generic, parameter-free and efficient. Extensive experiments using one synthetic dataset and three radar echo datasets demonstrate that our method improves perceptual metrics and meteorology skill scores, with a small trade-off to pixel-wise accuracy and structural similarity. Moreover, to improve the error margin in meteorological skill scores such as Critical Success Index (CSI) and Fractions Skill Score (FSS), we propose and adopt the Regional Histogram Divergence (RHD), a distance metric that considers the patch-wise similarity between signal-based imagery patterns with tolerance to local transforms. Code is available at https://github.com/argenycw/FACL <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23159v1-abstract-full').style.display = 'none'; document.getElementById('2410.23159v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by NeurIPS 2024. Camera-ready submission</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.15003">arXiv:2410.15003</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.15003">pdf</a>, <a href="https://arxiv.org/format/2410.15003">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Probability">math.PR</span> </div> </div> <p class="title is-5 mathjax"> Achieving O(1/N) Optimality Gap in Restless Bandits through Diffusion Approximation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chen Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Weina Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Ying%2C+L">Lei Ying</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.15003v1-abstract-short" style="display: inline;"> We study the finite horizon Restless Multi-Armed Bandit (RMAB) problem with $N$ homogeneous arms, focusing on the challenges posed by degenerate RMABs, which are prevalent in practical applications. While previous work has shown that Linear Programming (LP)-based policies achieve exponentially fast convergence relative to the LP upper bound in non-degenerate models, applying these LP-based policie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15003v1-abstract-full').style.display = 'inline'; document.getElementById('2410.15003v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.15003v1-abstract-full" style="display: none;"> We study the finite horizon Restless Multi-Armed Bandit (RMAB) problem with $N$ homogeneous arms, focusing on the challenges posed by degenerate RMABs, which are prevalent in practical applications. While previous work has shown that Linear Programming (LP)-based policies achieve exponentially fast convergence relative to the LP upper bound in non-degenerate models, applying these LP-based policies to degenerate RMABs results in slower convergence rates of $O(1/\sqrt{N})$. We construct a diffusion system that incorporates both the mean and variance of the stochastic processes, in contrast to the fluid system from the LP, which only accounts for the mean, thereby providing a more accurate representation of RMAB dynamics. Consequently, our novel diffusion-resolving policy achieves an optimality gap of $O(1/N)$ relative to the true optimal value, rather than the LP upper bound, revealing that the fluid approximation and the LP upper bound are too loose in degenerate settings. These insights pave the way for constructing policies that surpass the $O(1/\sqrt{N})$ optimality gap for any RMAB, whether degenerate or not. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15003v1-abstract-full').style.display = 'none'; document.getElementById('2410.15003v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">31 pages, 6 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 90C15; 90C25; 90C31; 90B15; 90B05 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.13454">arXiv:2410.13454</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.13454">pdf</a>, <a href="https://arxiv.org/ps/2410.13454">ps</a>, <a href="https://arxiv.org/format/2410.13454">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> Byzantine-Resilient Output Optimization of Multiagent via Self-Triggered Hybrid Detection Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenhang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+L">Liping Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Lv%2C+Y">Yuezu Lv</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+B">Bolei Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+Y">Yuanqing Xia</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.13454v1-abstract-short" style="display: inline;"> How to achieve precise distributed optimization despite unknown attacks, especially the Byzantine attacks, is one of the critical challenges for multiagent systems. This paper addresses a distributed resilient optimization for linear heterogeneous multi-agent systems faced with adversarial threats. We establish a framework aimed at realizing resilient optimization for continuous-time systems by in&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.13454v1-abstract-full').style.display = 'inline'; document.getElementById('2410.13454v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.13454v1-abstract-full" style="display: none;"> How to achieve precise distributed optimization despite unknown attacks, especially the Byzantine attacks, is one of the critical challenges for multiagent systems. This paper addresses a distributed resilient optimization for linear heterogeneous multi-agent systems faced with adversarial threats. We establish a framework aimed at realizing resilient optimization for continuous-time systems by incorporating a novel self-triggered hybrid detection approach. The proposed hybrid detection approach is able to identify attacks on neighbors using both error thresholds and triggering intervals, thereby optimizing the balance between effective attack detection and the reduction of excessive communication triggers. Through using an edge-based adaptive self-triggered approach, each agent can receive its neighbors&#39; information and determine whether these information is valid. If any neighbor prove invalid, each normal agent will isolate that neighbor by disconnecting communication along that specific edge. Importantly, our adaptive algorithm guarantees the accuracy of the optimization solution even when an agent is isolated by its neighbors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.13454v1-abstract-full').style.display = 'none'; document.getElementById('2410.13454v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.08320">arXiv:2410.08320</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.08320">pdf</a>, <a href="https://arxiv.org/format/2410.08320">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Do You Know What You Are Talking About? Characterizing Query-Knowledge Relevance For Reliable Retrieval Augmented Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zhuohang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jiaxin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chao Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Das%2C+K">Kamalika Das</a>, <a href="/search/cs?searchtype=author&amp;query=Kumar%2C+S">Sricharan Kumar</a>, <a href="/search/cs?searchtype=author&amp;query=Kantarcioglu%2C+M">Murat Kantarcioglu</a>, <a href="/search/cs?searchtype=author&amp;query=Malin%2C+B+A">Bradley A. Malin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.08320v1-abstract-short" style="display: inline;"> Language models (LMs) are known to suffer from hallucinations and misinformation. Retrieval augmented generation (RAG) that retrieves verifiable information from an external knowledge corpus to complement the parametric knowledge in LMs provides a tangible solution to these problems. However, the generation quality of RAG is highly dependent on the relevance between a user&#39;s query and the retrieve&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.08320v1-abstract-full').style.display = 'inline'; document.getElementById('2410.08320v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.08320v1-abstract-full" style="display: none;"> Language models (LMs) are known to suffer from hallucinations and misinformation. Retrieval augmented generation (RAG) that retrieves verifiable information from an external knowledge corpus to complement the parametric knowledge in LMs provides a tangible solution to these problems. However, the generation quality of RAG is highly dependent on the relevance between a user&#39;s query and the retrieved documents. Inaccurate responses may be generated when the query is outside of the scope of knowledge represented in the external knowledge corpus or if the information in the corpus is out-of-date. In this work, we establish a statistical framework that assesses how well a query can be answered by an RAG system by capturing the relevance of knowledge. We introduce an online testing procedure that employs goodness-of-fit (GoF) tests to inspect the relevance of each user query to detect out-of-knowledge queries with low knowledge relevance. Additionally, we develop an offline testing framework that examines a collection of user queries, aiming to detect significant shifts in the query distribution which indicates the knowledge corpus is no longer sufficiently capable of supporting the interests of the users. We demonstrate the capabilities of these strategies through a systematic evaluation on eight question-answering (QA) datasets, the results of which indicate that the new testing framework is an efficient solution to enhance the reliability of existing RAG systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.08320v1-abstract-full').style.display = 'none'; document.getElementById('2410.08320v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.01107">arXiv:2410.01107</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.01107">pdf</a>, <a href="https://arxiv.org/format/2410.01107">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Count of Monte Crypto: Accounting-based Defenses for Cross-Chain Bridges </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+E">Enze Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+E">Elisa Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+J+C">Jian Chen Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Izhikevich%2C+K">Katherine Izhikevich</a>, <a href="/search/cs?searchtype=author&amp;query=Grant%2C+S">Stewart Grant</a>, <a href="/search/cs?searchtype=author&amp;query=Stefan%2C+D">Deian Stefan</a>, <a href="/search/cs?searchtype=author&amp;query=Voelker%2C+G+M">Geoffrey M Voelker</a>, <a href="/search/cs?searchtype=author&amp;query=Savage%2C+S">Stefan Savage</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.01107v2-abstract-short" style="display: inline;"> Between 2021 and 2023, crypto assets valued at over \$US2.6 billion were stolen via attacks on &#34;bridges&#34; -- decentralized services designed to allow inter-blockchain exchange. While the individual exploits in each attack vary, a single design flaw underlies them all: the lack of end-to-end value accounting in cross-chain transactions. In this paper, we empirically analyze 10 million transactions u&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01107v2-abstract-full').style.display = 'inline'; document.getElementById('2410.01107v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.01107v2-abstract-full" style="display: none;"> Between 2021 and 2023, crypto assets valued at over \$US2.6 billion were stolen via attacks on &#34;bridges&#34; -- decentralized services designed to allow inter-blockchain exchange. While the individual exploits in each attack vary, a single design flaw underlies them all: the lack of end-to-end value accounting in cross-chain transactions. In this paper, we empirically analyze 10 million transactions used by key bridges during this period. We show that a simple invariant that balances cross-chain inflows and outflows is compatible with legitimate use, yet precisely identifies every known attack (and several likely attacks) in this data. Further, we show that this approach is not only sufficient for post-hoc audits, but can be implemented in-line in existing bridge designs to provide generic protection against a broad array of bridge vulnerabilities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01107v2-abstract-full').style.display = 'none'; document.getElementById('2410.01107v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Currently under submission</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.20111">arXiv:2409.20111</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.20111">pdf</a>, <a href="https://arxiv.org/format/2409.20111">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Robust Gaussian Splatting SLAM by Leveraging Loop Closure </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+Z">Zunjie Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+Y">Youxu Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xin Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chengang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+F">Feng Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Yuen%2C+C">Chau Yuen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yanyan Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.20111v1-abstract-short" style="display: inline;"> 3D Gaussian Splatting algorithms excel in novel view rendering applications and have been adapted to extend the capabilities of traditional SLAM systems. However, current Gaussian Splatting SLAM methods, designed mainly for hand-held RGB or RGB-D sensors, struggle with tracking drifts when used with rotating RGB-D camera setups. In this paper, we propose a robust Gaussian Splatting SLAM architectu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.20111v1-abstract-full').style.display = 'inline'; document.getElementById('2409.20111v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.20111v1-abstract-full" style="display: none;"> 3D Gaussian Splatting algorithms excel in novel view rendering applications and have been adapted to extend the capabilities of traditional SLAM systems. However, current Gaussian Splatting SLAM methods, designed mainly for hand-held RGB or RGB-D sensors, struggle with tracking drifts when used with rotating RGB-D camera setups. In this paper, we propose a robust Gaussian Splatting SLAM architecture that utilizes inputs from rotating multiple RGB-D cameras to achieve accurate localization and photorealistic rendering performance. The carefully designed Gaussian Splatting Loop Closure module effectively addresses the issue of accumulated tracking and mapping errors found in conventional Gaussian Splatting SLAM systems. First, each Gaussian is associated with an anchor frame and categorized as historical or novel based on its timestamp. By rendering different types of Gaussians at the same viewpoint, the proposed loop detection strategy considers both co-visibility relationships and distinct rendering outcomes. Furthermore, a loop closure optimization approach is proposed to remove camera pose drift and maintain the high quality of 3D Gaussian models. The approach uses a lightweight pose graph optimization algorithm to correct pose drift and updates Gaussians based on the optimized poses. Additionally, a bundle adjustment scheme further refines camera poses using photometric and geometric constraints, ultimately enhancing the global consistency of scenarios. Quantitative and qualitative evaluations on both synthetic and real-world datasets demonstrate that our method outperforms state-of-the-art methods in camera pose estimation and novel view rendering tasks. The code will be open-sourced for the community. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.20111v1-abstract-full').style.display = 'none'; document.getElementById('2409.20111v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.17907">arXiv:2409.17907</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.17907">pdf</a>, <a href="https://arxiv.org/format/2409.17907">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Emerging Technologies">cs.ET</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.14722/ndss.2025.23997">10.14722/ndss.2025.23997 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> PhantomLiDAR: Cross-modality Signal Injection Attacks against LiDAR </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jin%2C+Z">Zizhi Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Q">Qinhong Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+X">Xuancun Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chen Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+X">Xiaoyu Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+W">Wenyuan Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.17907v1-abstract-short" style="display: inline;"> LiDAR (Light Detection and Ranging) is a pivotal sensor for autonomous driving, offering precise 3D spatial information. Previous signal attacks against LiDAR systems mainly exploit laser signals. In this paper, we investigate the possibility of cross-modality signal injection attacks, i.e., injecting intentional electromagnetic interference (IEMI) to manipulate LiDAR output. Our insight is that t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.17907v1-abstract-full').style.display = 'inline'; document.getElementById('2409.17907v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.17907v1-abstract-full" style="display: none;"> LiDAR (Light Detection and Ranging) is a pivotal sensor for autonomous driving, offering precise 3D spatial information. Previous signal attacks against LiDAR systems mainly exploit laser signals. In this paper, we investigate the possibility of cross-modality signal injection attacks, i.e., injecting intentional electromagnetic interference (IEMI) to manipulate LiDAR output. Our insight is that the internal modules of a LiDAR, i.e., the laser receiving circuit, the monitoring sensors, and the beam-steering modules, even with strict electromagnetic compatibility (EMC) testing, can still couple with the IEMI attack signals and result in the malfunction of LiDAR systems. Based on the above attack surfaces, we propose the PhantomLiDAR attack, which manipulates LiDAR output in terms of Points Interference, Points Injection, Points Removal, and even LiDAR Power-Off. We evaluate and demonstrate the effectiveness of PhantomLiDAR with both simulated and real-world experiments on five COTS LiDAR systems. We also conduct feasibility experiments in real-world moving scenarios. We provide potential defense measures that can be implemented at both the sensor level and the vehicle system level to mitigate the risks associated with IEMI attacks. Video demonstrations can be viewed at https://sites.google.com/view/phantomlidar. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.17907v1-abstract-full').style.display = 'none'; document.getElementById('2409.17907v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.17873">arXiv:2409.17873</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.17873">pdf</a>, <a href="https://arxiv.org/format/2409.17873">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.14722/ndss.2025.23691">10.14722/ndss.2025.23691 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> ReThink: Reveal the Threat of Electromagnetic Interference on Power Inverters </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+F">Fengchen Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Dan%2C+Z">Zihao Dan</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+K">Kaikai Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chen Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+X">Xiaoyu Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+W">Wenyuan Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.17873v1-abstract-short" style="display: inline;"> With the boom of renewable energy sources (RES), the number of power inverters proliferates. Power inverters are the key electronic devices that transform the direct current (DC) power from RES to the alternating current (AC) power on the grids, and their security can affect the stable operation of RES and even power grids. This paper analyzes the security of photovoltaic (PV) inverters from the a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.17873v1-abstract-full').style.display = 'inline'; document.getElementById('2409.17873v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.17873v1-abstract-full" style="display: none;"> With the boom of renewable energy sources (RES), the number of power inverters proliferates. Power inverters are the key electronic devices that transform the direct current (DC) power from RES to the alternating current (AC) power on the grids, and their security can affect the stable operation of RES and even power grids. This paper analyzes the security of photovoltaic (PV) inverters from the aspects of internal sensors since they serve as the foundation for safe power conversion. We discover that both the embedded current sensors and voltage sensors are vulnerable to electromagnetic interference (EMI) of 1 GHz or higher, despite electromagnetic compatibility (EMC) countermeasures. Such vulnerabilities can lead to incorrect measurements and deceiving the control algorithms, and we design ReThink that could produce three types of consequences on PV inverters by emitting carefully crafted EMI, i.e., Denial of Service (DoS), damaging inverters physically or damping the power output. We successfully validate these consequences on 5 off-the-shelf PV inverters, and even in a real-world microgrid, by transmitting EMI signals at a distance of 100-150cm and a total power within 20W. Our work aims to raise awareness of the security of power electronic devices of RES, as they represent an emerging Cyber-Physical attack surface to the future RES-dominated grid. Finally, to cope with such threats, we provide hardware and software-based countermeasures. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.17873v1-abstract-full').style.display = 'none'; document.getElementById('2409.17873v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by NDSS Symposium 2025. Please cite this paper as &#34;Fengchen Yang, Zihao Dan, Kaikai Pan, Chen Yan, Xiaoyu Ji, Wenyuan Xu. ReThink: Reveal the Threat of Electromagnetic Interference on Power Inverters. In the Network and Distributed System Security Symposium 2025 (NDSS 2025).&#34;</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.16546">arXiv:2409.16546</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.16546">pdf</a>, <a href="https://arxiv.org/format/2409.16546">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> AlignedKV: Reducing Memory Access of KV-Cache with Precision-Aligned Quantization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tan%2C+Y">Yifan Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoze Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chao Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+Y">Yangdong Deng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.16546v2-abstract-short" style="display: inline;"> Model quantization has become a crucial technique to address the issues of large memory consumption and long inference times associated with LLMs. Mixed-precision quantization, which distinguishes between important and unimportant parameters, stands out among numerous quantization schemes as it achieves a balance between precision and compression rate. However, existing approaches can only identif&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16546v2-abstract-full').style.display = 'inline'; document.getElementById('2409.16546v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.16546v2-abstract-full" style="display: none;"> Model quantization has become a crucial technique to address the issues of large memory consumption and long inference times associated with LLMs. Mixed-precision quantization, which distinguishes between important and unimportant parameters, stands out among numerous quantization schemes as it achieves a balance between precision and compression rate. However, existing approaches can only identify important parameters through qualitative analysis and manual experiments without quantitatively analyzing how their importance is determined. We propose a new criterion, so-called &#39;precision alignment&#39;, to build a quantitative framework to holistically evaluate the importance of parameters in mixed-precision quantization. Our observations on floating point addition under various real-world scenarios suggest that two addends should have identical precision, otherwise the information in the higher-precision number will be wasted. Such an observation offers an essential principle to determine the precision of each parameter in matrix multiplication operation. As the first step towards applying the above discovery to large model inference, we develop a dynamic KV-Cache quantization technique to effectively reduce memory access latency. Different from existing quantization approaches that focus on memory saving, this work directly aims to accelerate LLM inference through quantifying floating numbers. The proposed technique attains a 25% saving of memory access and delivers up to 1.3x speedup in the computation of attention in the decoding phase of LLM, with almost no loss of precision. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16546v2-abstract-full').style.display = 'none'; document.getElementById('2409.16546v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.15636">arXiv:2409.15636</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.15636">pdf</a>, <a href="https://arxiv.org/format/2409.15636">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Personalized Federated Learning via Backbone Self-Distillation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+P">Pengju Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+B">Bochao Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+D">Dan Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenggang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Ge%2C+S">Shiming Ge</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.15636v1-abstract-short" style="display: inline;"> In practical scenarios, federated learning frequently necessitates training personalized models for each client using heterogeneous data. This paper proposes a backbone self-distillation approach to facilitate personalized federated learning. In this approach, each client trains its local model and only sends the backbone weights to the server. These weights are then aggregated to create a global&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.15636v1-abstract-full').style.display = 'inline'; document.getElementById('2409.15636v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.15636v1-abstract-full" style="display: none;"> In practical scenarios, federated learning frequently necessitates training personalized models for each client using heterogeneous data. This paper proposes a backbone self-distillation approach to facilitate personalized federated learning. In this approach, each client trains its local model and only sends the backbone weights to the server. These weights are then aggregated to create a global backbone, which is returned to each client for updating. However, the client&#39;s local backbone lacks personalization because of the common representation. To solve this problem, each client further performs backbone self-distillation by using the global backbone as a teacher and transferring knowledge to update the local backbone. This process involves learning two components: the shared backbone for common representation and the private head for local personalization, which enables effective global knowledge transfer. Extensive experiments and comparisons with 12 state-of-the-art approaches demonstrate the effectiveness of our approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.15636v1-abstract-full').style.display = 'none'; document.getElementById('2409.15636v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Pubished in ACM MMAsia 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.13551">arXiv:2409.13551</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.13551">pdf</a>, <a href="https://arxiv.org/format/2409.13551">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3691620.3695503">10.1145/3691620.3695503 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Contextualized Data-Wrangling Code Generation in Computational Notebooks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+J">Junjie Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+D">Daya Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chenglong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+J">Jiazhen Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+S">Shuai Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Inala%2C+J+P">Jeevana Priya Inala</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Cong Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+J">Jianfeng Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+N">Nan Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Lyu%2C+M+R">Michael R. Lyu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.13551v1-abstract-short" style="display: inline;"> Data wrangling, the process of preparing raw data for further analysis in computational notebooks, is a crucial yet time-consuming step in data science. Code generation has the potential to automate the data wrangling process to reduce analysts&#39; overhead by translating user intents into executable code. Precisely generating data wrangling code necessitates a comprehensive consideration of the rich&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13551v1-abstract-full').style.display = 'inline'; document.getElementById('2409.13551v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.13551v1-abstract-full" style="display: none;"> Data wrangling, the process of preparing raw data for further analysis in computational notebooks, is a crucial yet time-consuming step in data science. Code generation has the potential to automate the data wrangling process to reduce analysts&#39; overhead by translating user intents into executable code. Precisely generating data wrangling code necessitates a comprehensive consideration of the rich context present in notebooks, including textual context, code context and data context. However, notebooks often interleave multiple non-linear analysis tasks into linear sequence of code blocks, where the contextual dependencies are not clearly reflected. Directly training models with source code blocks fails to fully exploit the contexts for accurate wrangling code generation. To bridge the gap, we aim to construct a high quality datasets with clear and rich contexts to help training models for data wrangling code generation tasks. In this work, we first propose an automated approach, CoCoMine to mine data-wrangling code generation examples with clear multi-modal contextual dependency. It first adopts data flow analysis to identify the code blocks containing data wrangling codes. Then, CoCoMine extracts the contextualized datawrangling code examples through tracing and replaying notebooks. With CoCoMine, we construct CoCoNote, a dataset containing 58,221 examples for Contextualized Data-wrangling Code generation in Notebooks. To demonstrate the effectiveness of our dataset, we finetune a range of pretrained code models and prompt various large language models on our task. Furthermore, we also propose DataCoder, which encodes data context and code&amp;textual contexts separately to enhance code generation. Experiment results demonstrate the significance of incorporating data context in data-wrangling code generation and the effectiveness of our model. We release code and data at url... <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13551v1-abstract-full').style.display = 'none'; document.getElementById('2409.13551v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear at ASE 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.13398">arXiv:2409.13398</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.13398">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Unsourced Sparse Multiple Access foUnsourced Sparse Multiple Access for 6G Massive Communicationr 6G Massive Communication </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+Y">Yifei Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Y">Yuhong Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chunlin Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Sen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+S">Shuai Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+X">Xiaodong Shen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.13398v2-abstract-short" style="display: inline;"> Massive communication is one of key scenarios of 6G where two magnitude higher connection density would be required to serve diverse services. As a promising direction, unsourced multiple access has been proved to outperform significantly over orthogonal multiple access (OMA) or slotted-ALOHA in massive connections. In this paper we describe a design framework of unsourced sparse multiple access (&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13398v2-abstract-full').style.display = 'inline'; document.getElementById('2409.13398v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.13398v2-abstract-full" style="display: none;"> Massive communication is one of key scenarios of 6G where two magnitude higher connection density would be required to serve diverse services. As a promising direction, unsourced multiple access has been proved to outperform significantly over orthogonal multiple access (OMA) or slotted-ALOHA in massive connections. In this paper we describe a design framework of unsourced sparse multiple access (USMA) that consists of two key modules: compressed sensing for preamble generation, and sparse interleaver division multiple access (SIDMA) for main packet transmission. Simulation results of general design of USMA show that the theoretical bound can be approached within 1~1.5 dB by using simple channel codes like convolutional. To illustrate the scalability of USMA, a customized design for ambient Internet of Things (A-IoT) is proposed, so that much less memory and computation are required. Simulations results of Rayleigh fading and realistic channel estimation show that USMA based A-IoT solution can deliver nearly 4 times capacity and 6 times efficiency for random access over traditional radio frequency identification (RFID) technology. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13398v2-abstract-full').style.display = 'none'; document.getElementById('2409.13398v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 5 figures and 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.10411">arXiv:2409.10411</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.10411">pdf</a>, <a href="https://arxiv.org/format/2409.10411">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> A Large-Scale Privacy Assessment of Android Third-Party SDKs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Meng%2C+M+H">Mark Huasong Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chuan Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Hao%2C+Y">Yun Hao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qing Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zeyu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+K">Kailong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Teo%2C+S+G">Sin Gee Teo</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+G">Guangdong Bai</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+J+S">Jin Song Dong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.10411v1-abstract-short" style="display: inline;"> Third-party Software Development Kits (SDKs) are widely adopted in Android app development, to effortlessly accelerate development pipelines and enhance app functionality. However, this convenience raises substantial concerns about unauthorized access to users&#39; privacy-sensitive information, which could be further abused for illegitimate purposes like user tracking or monetization. Our study offer&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.10411v1-abstract-full').style.display = 'inline'; document.getElementById('2409.10411v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.10411v1-abstract-full" style="display: none;"> Third-party Software Development Kits (SDKs) are widely adopted in Android app development, to effortlessly accelerate development pipelines and enhance app functionality. However, this convenience raises substantial concerns about unauthorized access to users&#39; privacy-sensitive information, which could be further abused for illegitimate purposes like user tracking or monetization. Our study offers a targeted analysis of user privacy protection among Android third-party SDKs, filling a critical gap in the Android software supply chain. It focuses on two aspects of their privacy practices, including data exfiltration and behavior-policy compliance (or privacy compliance), utilizing techniques of taint analysis and large language models. It covers 158 widely-used SDKs from two key SDK release platforms, the official one and a large alternative one. From them, we identified 338 instances of privacy data exfiltration. On the privacy compliance, our study reveals that more than 30% of the examined SDKs fail to provide a privacy policy to disclose their data handling practices. Among those that provide privacy policies, 37% of them over-collect user data, and 88% falsely claim access to sensitive data. We revisit the latest versions of the SDKs after 12 months. Our analysis demonstrates a persistent lack of improvement in these concerning trends. Based on our findings, we propose three actionable recommendations to mitigate the privacy leakage risks and enhance privacy protection for Android users. Our research not only serves as an urgent call for industry attention but also provides crucial insights for future regulatory interventions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.10411v1-abstract-full').style.display = 'none'; document.getElementById('2409.10411v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.09272">arXiv:2409.09272</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.09272">pdf</a>, <a href="https://arxiv.org/format/2409.09272">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> SafeEar: Content Privacy-Preserving Audio Deepfake Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xinfeng Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+K">Kai Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+Y">Yifan Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chen Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+X">Xiaoyu Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+W">Wenyuan Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.09272v1-abstract-short" style="display: inline;"> Text-to-Speech (TTS) and Voice Conversion (VC) models have exhibited remarkable performance in generating realistic and natural audio. However, their dark side, audio deepfake poses a significant threat to both society and individuals. Existing countermeasures largely focus on determining the genuineness of speech based on complete original audio recordings, which however often contain private con&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09272v1-abstract-full').style.display = 'inline'; document.getElementById('2409.09272v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.09272v1-abstract-full" style="display: none;"> Text-to-Speech (TTS) and Voice Conversion (VC) models have exhibited remarkable performance in generating realistic and natural audio. However, their dark side, audio deepfake poses a significant threat to both society and individuals. Existing countermeasures largely focus on determining the genuineness of speech based on complete original audio recordings, which however often contain private content. This oversight may refrain deepfake detection from many applications, particularly in scenarios involving sensitive information like business secrets. In this paper, we propose SafeEar, a novel framework that aims to detect deepfake audios without relying on accessing the speech content within. Our key idea is to devise a neural audio codec into a novel decoupling model that well separates the semantic and acoustic information from audio samples, and only use the acoustic information (e.g., prosody and timbre) for deepfake detection. In this way, no semantic content will be exposed to the detector. To overcome the challenge of identifying diverse deepfake audio without semantic clues, we enhance our deepfake detector with real-world codec augmentation. Extensive experiments conducted on four benchmark datasets demonstrate SafeEar&#39;s effectiveness in detecting various deepfake techniques with an equal error rate (EER) down to 2.02%. Simultaneously, it shields five-language speech content from being deciphered by both machine and human auditory analysis, demonstrated by word error rates (WERs) all above 93.93% and our user study. Furthermore, our benchmark constructed for anti-deepfake and anti-content recovery evaluation helps provide a basis for future research in the realms of audio privacy preservation and deepfake detection. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09272v1-abstract-full').style.display = 'none'; document.getElementById('2409.09272v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ACM CCS 2024. Please cite this paper as &#34;Xinfeng Li, Kai Li, Yifan Zheng, Chen Yan, Xiaoyu Ji, Wenyuan Xu. SafeEar: Content Privacy-Preserving Audio Deepfake Detection. In Proceedings of ACM Conference on Computer and Communications Security (CCS), 2024.&#34;</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.08534">arXiv:2409.08534</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.08534">pdf</a>, <a href="https://arxiv.org/format/2409.08534">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> AnalogGym: An Open and Practical Testing Suite for Analog Circuit Synthesis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+J">Jintao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhi%2C+H">Haochang Zhi</a>, <a href="/search/cs?searchtype=author&amp;query=Lyu%2C+R">Ruiyu Lyu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+W">Wangzhen Li</a>, <a href="/search/cs?searchtype=author&amp;query=Bi%2C+Z">Zhaori Bi</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+K">Keren Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+Y">Yanhan Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Shan%2C+W">Weiwei Shan</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Changhao Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+F">Fan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yun Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+X">Xuan Zeng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.08534v1-abstract-short" style="display: inline;"> Recent advances in machine learning (ML) for automating analog circuit synthesis have been significant, yet challenges remain. A critical gap is the lack of a standardized evaluation framework, compounded by various process design kits (PDKs), simulation tools, and a limited variety of circuit topologies. These factors hinder direct comparisons and the validation of algorithms. To address these sh&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.08534v1-abstract-full').style.display = 'inline'; document.getElementById('2409.08534v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.08534v1-abstract-full" style="display: none;"> Recent advances in machine learning (ML) for automating analog circuit synthesis have been significant, yet challenges remain. A critical gap is the lack of a standardized evaluation framework, compounded by various process design kits (PDKs), simulation tools, and a limited variety of circuit topologies. These factors hinder direct comparisons and the validation of algorithms. To address these shortcomings, we introduced AnalogGym, an open-source testing suite designed to provide fair and comprehensive evaluations. AnalogGym includes 30 circuit topologies in five categories: sensing front ends, voltage references, low dropout regulators, amplifiers, and phase-locked loops. It supports several technology nodes for academic and commercial applications and is compatible with commercial simulators such as Cadence Spectre, Synopsys HSPICE, and the open-source simulator Ngspice. AnalogGym standardizes the assessment of ML algorithms in analog circuit synthesis and promotes reproducibility with its open datasets and detailed benchmark specifications. AnalogGym&#39;s user-friendly design allows researchers to easily adapt it for robust, transparent comparisons of state-of-the-art methods, while also exposing them to real-world industrial design challenges, enhancing the practical relevance of their work. Additionally, we have conducted a comprehensive comparison study of various analog sizing methods on AnalogGym, highlighting the capabilities and advantages of different approaches. AnalogGym is available in the GitHub repository https://github.com/CODA-Team/AnalogGym. The documentation is also available at http://coda-team.github.io/AnalogGym/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.08534v1-abstract-full').style.display = 'none'; document.getElementById('2409.08534v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.07200">arXiv:2409.07200</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.07200">pdf</a>, <a href="https://arxiv.org/format/2409.07200">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> ThermalGaussian: Thermal 3D Gaussian Splatting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lu%2C+R">Rongfeng Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Hangyu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+Z">Zunjie Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Qin%2C+Y">Yuhang Qin</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+M">Ming Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Le Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenggang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+A">Anke Xue</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.07200v1-abstract-short" style="display: inline;"> Thermography is especially valuable for the military and other users of surveillance cameras. Some recent methods based on Neural Radiance Fields (NeRF) are proposed to reconstruct the thermal scenes in 3D from a set of thermal and RGB images. However, unlike NeRF, 3D Gaussian splatting (3DGS) prevails due to its rapid training and real-time rendering. In this work, we propose ThermalGaussian, the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.07200v1-abstract-full').style.display = 'inline'; document.getElementById('2409.07200v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.07200v1-abstract-full" style="display: none;"> Thermography is especially valuable for the military and other users of surveillance cameras. Some recent methods based on Neural Radiance Fields (NeRF) are proposed to reconstruct the thermal scenes in 3D from a set of thermal and RGB images. However, unlike NeRF, 3D Gaussian splatting (3DGS) prevails due to its rapid training and real-time rendering. In this work, we propose ThermalGaussian, the first thermal 3DGS approach capable of rendering high-quality images in RGB and thermal modalities. We first calibrate the RGB camera and the thermal camera to ensure that both modalities are accurately aligned. Subsequently, we use the registered images to learn the multimodal 3D Gaussians. To prevent the overfitting of any single modality, we introduce several multimodal regularization constraints. We also develop smoothing constraints tailored to the physical characteristics of the thermal modality. Besides, we contribute a real-world dataset named RGBT-Scenes, captured by a hand-hold thermal-infrared camera, facilitating future research on thermal scene reconstruction. We conduct comprehensive experiments to show that ThermalGaussian achieves photorealistic rendering of thermal images and improves the rendering quality of RGB images. With the proposed multimodal regularization constraints, we also reduced the model&#39;s storage cost by 90\%. The code and dataset will be released. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.07200v1-abstract-full').style.display = 'none'; document.getElementById('2409.07200v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.04801">arXiv:2409.04801</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.04801">pdf</a>, <a href="https://arxiv.org/format/2409.04801">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SpotActor: Training-Free Layout-Controlled Consistent Image Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jiahao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Caixia Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Weizhan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+H">Haonan Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+M">Mengmeng Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+G">Guang Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+T">Tieliang Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+H">Hao Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jingdong Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.04801v1-abstract-short" style="display: inline;"> Text-to-image diffusion models significantly enhance the efficiency of artistic creation with high-fidelity image generation. However, in typical application scenarios like comic book production, they can neither place each subject into its expected spot nor maintain the consistent appearance of each subject across images. For these issues, we pioneer a novel task, Layout-to-Consistent-Image (L2CI&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.04801v1-abstract-full').style.display = 'inline'; document.getElementById('2409.04801v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.04801v1-abstract-full" style="display: none;"> Text-to-image diffusion models significantly enhance the efficiency of artistic creation with high-fidelity image generation. However, in typical application scenarios like comic book production, they can neither place each subject into its expected spot nor maintain the consistent appearance of each subject across images. For these issues, we pioneer a novel task, Layout-to-Consistent-Image (L2CI) generation, which produces consistent and compositional images in accordance with the given layout conditions and text prompts. To accomplish this challenging task, we present a new formalization of dual energy guidance with optimization in a dual semantic-latent space and thus propose a training-free pipeline, SpotActor, which features a layout-conditioned backward update stage and a consistent forward sampling stage. In the backward stage, we innovate a nuanced layout energy function to mimic the attention activations with a sigmoid-like objective. While in the forward stage, we design Regional Interconnection Self-Attention (RISA) and Semantic Fusion Cross-Attention (SFCA) mechanisms that allow mutual interactions across images. To evaluate the performance, we present ActorBench, a specified benchmark with hundreds of reasonable prompt-box pairs stemming from object detection datasets. Comprehensive experiments are conducted to demonstrate the effectiveness of our method. The results prove that SpotActor fulfills the expectations of this task and showcases the potential for practical applications with superior layout alignment, subject consistency, prompt conformity and background diversity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.04801v1-abstract-full').style.display = 'none'; document.getElementById('2409.04801v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.14393">arXiv:2408.14393</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.14393">pdf</a>, <a href="https://arxiv.org/format/2408.14393">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> CURE4Rec: A Benchmark for Recommendation Unlearning with Deeper Influence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chaochao Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jiaming Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yizhao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Li Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Lyu%2C+L">Lingjuan Lyu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yuyuan Li</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+B">Biao Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenggang Yan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.14393v2-abstract-short" style="display: inline;"> With increasing privacy concerns in artificial intelligence, regulations have mandated the right to be forgotten, granting individuals the right to withdraw their data from models. Machine unlearning has emerged as a potential solution to enable selective forgetting in models, particularly in recommender systems where historical data contains sensitive user information. Despite recent advances in&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.14393v2-abstract-full').style.display = 'inline'; document.getElementById('2408.14393v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.14393v2-abstract-full" style="display: none;"> With increasing privacy concerns in artificial intelligence, regulations have mandated the right to be forgotten, granting individuals the right to withdraw their data from models. Machine unlearning has emerged as a potential solution to enable selective forgetting in models, particularly in recommender systems where historical data contains sensitive user information. Despite recent advances in recommendation unlearning, evaluating unlearning methods comprehensively remains challenging due to the absence of a unified evaluation framework and overlooked aspects of deeper influence, e.g., fairness. To address these gaps, we propose CURE4Rec, the first comprehensive benchmark for recommendation unlearning evaluation. CURE4Rec covers four aspects, i.e., unlearning Completeness, recommendation Utility, unleaRning efficiency, and recommendation fairnEss, under three data selection strategies, i.e., core data, edge data, and random data. Specifically, we consider the deeper influence of unlearning on recommendation fairness and robustness towards data with varying impact levels. We construct multiple datasets with CURE4Rec evaluation and conduct extensive experiments on existing recommendation unlearning methods. Our code is released at https://github.com/xiye7lai/CURE4Rec. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.14393v2-abstract-full').style.display = 'none'; document.getElementById('2408.14393v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to NeurIPS 2024, Datasets and Benchmarks. Website: https://oktton.github.io</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.14357">arXiv:2408.14357</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.14357">pdf</a>, <a href="https://arxiv.org/format/2408.14357">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Exploring ChatGPT App Ecosystem: Distribution, Deployment and Security </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chuan Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+R">Ruomai Ren</a>, <a href="/search/cs?searchtype=author&amp;query=Meng%2C+M+H">Mark Huasong Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Wan%2C+L">Liuhuo Wan</a>, <a href="/search/cs?searchtype=author&amp;query=Ooi%2C+T+Y">Tian Yang Ooi</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+G">Guangdong Bai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.14357v1-abstract-short" style="display: inline;"> ChatGPT has enabled third-party developers to create plugins to expand ChatGPT&#39;s capabilities.These plugins are distributed through OpenAI&#39;s plugin store, making them easily accessible to users. With ChatGPT as the backbone, this app ecosystem has illustrated great business potential by offering users personalized services in a conversational manner. Nonetheless, many crucial aspects regarding app&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.14357v1-abstract-full').style.display = 'inline'; document.getElementById('2408.14357v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.14357v1-abstract-full" style="display: none;"> ChatGPT has enabled third-party developers to create plugins to expand ChatGPT&#39;s capabilities.These plugins are distributed through OpenAI&#39;s plugin store, making them easily accessible to users. With ChatGPT as the backbone, this app ecosystem has illustrated great business potential by offering users personalized services in a conversational manner. Nonetheless, many crucial aspects regarding app development, deployment, and security of this ecosystem have yet to be thoroughly studied in the research community, potentially hindering a broader adoption by both developers and users. In this work, we conduct the first comprehensive study of the ChatGPT app ecosystem, aiming to illuminate its landscape for our research community. Our study examines the distribution and deployment models in the integration of LLMs and third-party apps, and assesses their security and privacy implications. We uncover an uneven distribution of functionality among ChatGPT plugins, highlighting prevalent and emerging topics. We also identify severe flaws in the authentication and user data protection for third-party app APIs integrated within LLMs, revealing a concerning status quo of security and privacy in this app ecosystem. Our work provides insights for the secure and sustainable development of this rapidly evolving ecosystem. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.14357v1-abstract-full').style.display = 'none'; document.getElementById('2408.14357v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by the 39th IEEE/ACM International Conference on Automated Software Engineering (ASE 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.12879">arXiv:2408.12879</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.12879">pdf</a>, <a href="https://arxiv.org/format/2408.12879">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Frequency-aware Feature Fusion for Dense Image Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+L">Linwei Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+Y">Ying Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+L">Lin Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenggang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Harada%2C+T">Tatsuya Harada</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+G">Gao Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.12879v1-abstract-short" style="display: inline;"> Dense image prediction tasks demand features with strong category information and precise spatial boundary details at high resolution. To achieve this, modern hierarchical models often utilize feature fusion, directly adding upsampled coarse features from deep layers and high-resolution features from lower levels. In this paper, we observe rapid variations in fused feature values within objects, r&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.12879v1-abstract-full').style.display = 'inline'; document.getElementById('2408.12879v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.12879v1-abstract-full" style="display: none;"> Dense image prediction tasks demand features with strong category information and precise spatial boundary details at high resolution. To achieve this, modern hierarchical models often utilize feature fusion, directly adding upsampled coarse features from deep layers and high-resolution features from lower levels. In this paper, we observe rapid variations in fused feature values within objects, resulting in intra-category inconsistency due to disturbed high-frequency features. Additionally, blurred boundaries in fused features lack accurate high frequency, leading to boundary displacement. Building upon these observations, we propose Frequency-Aware Feature Fusion (FreqFusion), integrating an Adaptive Low-Pass Filter (ALPF) generator, an offset generator, and an Adaptive High-Pass Filter (AHPF) generator. The ALPF generator predicts spatially-variant low-pass filters to attenuate high-frequency components within objects, reducing intra-class inconsistency during upsampling. The offset generator refines large inconsistent features and thin boundaries by replacing inconsistent features with more consistent ones through resampling, while the AHPF generator enhances high-frequency detailed boundary information lost during downsampling. Comprehensive visualization and quantitative analysis demonstrate that FreqFusion effectively improves feature consistency and sharpens object boundaries. Extensive experiments across various dense prediction tasks confirm its effectiveness. The code is made publicly available at https://github.com/Linwei-Chen/FreqFusion. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.12879v1-abstract-full').style.display = 'none'; document.getElementById('2408.12879v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by TPAMI (2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.06646">arXiv:2408.06646</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.06646">pdf</a>, <a href="https://arxiv.org/format/2408.06646">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Hybrid SD: Edge-Cloud Collaborative Inference for Stable Diffusion Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yan%2C+C">Chenqian Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Songwei Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Hongjian Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+X">Xurui Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xiaojian Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+F">Fangmin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+L">Lean Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Mei%2C+X">Xing Mei</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.06646v2-abstract-short" style="display: inline;"> Stable Diffusion Models (SDMs) have shown remarkable proficiency in image synthesis. However, their broad application is impeded by their large model sizes and intensive computational requirements, which typically require expensive cloud servers for deployment. On the flip side, while there are many compact models tailored for edge devices that can reduce these demands, they often compromise on se&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.06646v2-abstract-full').style.display = 'inline'; document.getElementById('2408.06646v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.06646v2-abstract-full" style="display: none;"> Stable Diffusion Models (SDMs) have shown remarkable proficiency in image synthesis. However, their broad application is impeded by their large model sizes and intensive computational requirements, which typically require expensive cloud servers for deployment. On the flip side, while there are many compact models tailored for edge devices that can reduce these demands, they often compromise on semantic integrity and visual quality when compared to full-sized SDMs. To bridge this gap, we introduce Hybrid SD, an innovative, training-free SDMs inference framework designed for edge-cloud collaborative inference. Hybrid SD distributes the early steps of the diffusion process to the large models deployed on cloud servers, enhancing semantic planning. Furthermore, small efficient models deployed on edge devices can be integrated for refining visual details in the later stages. Acknowledging the diversity of edge devices with differing computational and storage capacities, we employ structural pruning to the SDMs U-Net and train a lightweight VAE. Empirical evaluations demonstrate that our compressed models achieve state-of-the-art parameter efficiency (225.8M) on edge devices with competitive image quality. Additionally, Hybrid SD reduces the cloud cost by 66% with edge-cloud collaborative inference. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.06646v2-abstract-full').style.display = 'none'; document.getElementById('2408.06646v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Yan%2C+C&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Yan%2C+C&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Yan%2C+C&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Yan%2C+C&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Yan%2C+C&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Yan%2C+C&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10