CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 6,830 results for author: <span class="mathjax">Wang, Z</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Wang%2C+Z">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Wang, Z"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Wang%2C+Z&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Wang, Z"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14386">arXiv:2411.14386</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14386">pdf</a>, <a href="https://arxiv.org/format/2411.14386">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Learning Humanoid Locomotion with Perceptive Internal Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Long%2C+J">Junfeng Long</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+J">Junli Ren</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+M">Moji Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zirui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+T">Tao Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+P">Ping Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Pang%2C+J">Jiangmiao Pang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14386v1-abstract-short" style="display: inline;"> In contrast to quadruped robots that can navigate diverse terrains using a &#34;blind&#34; policy, humanoid robots require accurate perception for stable locomotion due to their high degrees of freedom and inherently unstable morphology. However, incorporating perceptual signals often introduces additional disturbances to the system, potentially reducing its robustness, generalizability, and efficiency. T&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14386v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14386v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14386v1-abstract-full" style="display: none;"> In contrast to quadruped robots that can navigate diverse terrains using a &#34;blind&#34; policy, humanoid robots require accurate perception for stable locomotion due to their high degrees of freedom and inherently unstable morphology. However, incorporating perceptual signals often introduces additional disturbances to the system, potentially reducing its robustness, generalizability, and efficiency. This paper presents the Perceptive Internal Model (PIM), which relies on onboard, continuously updated elevation maps centered around the robot to perceive its surroundings. We train the policy using ground-truth obstacle heights surrounding the robot in simulation, optimizing it based on the Hybrid Internal Model (HIM), and perform inference with heights sampled from the constructed elevation map. Unlike previous methods that directly encode depth maps or raw point clouds, our approach allows the robot to perceive the terrain beneath its feet clearly and is less affected by camera movement or noise. Furthermore, since depth map rendering is not required in simulation, our method introduces minimal additional computational costs and can train the policy in 3 hours on an RTX 4090 GPU. We verify the effectiveness of our method across various humanoid robots, various indoor and outdoor terrains, stairs, and various sensor configurations. Our method can enable a humanoid robot to continuously climb stairs and has the potential to serve as a foundational algorithm for the development of future humanoid control methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14386v1-abstract-full').style.display = 'none'; document.getElementById('2411.14386v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">submitted to ICRA2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14205">arXiv:2411.14205</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14205">pdf</a>, <a href="https://arxiv.org/format/2411.14205">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Is this Generated Person Existed in Real-world? Fine-grained Detecting and Calibrating Abnormal Human-body </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zeqing Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Q">Qingyang Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Wan%2C+W">Wentao Wan</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Haojie Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+K">Keze Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+Y">Yonghong Tian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14205v1-abstract-short" style="display: inline;"> Recent improvements in visual synthesis have significantly enhanced the depiction of generated human photos, which are pivotal due to their wide applicability and demand. Nonetheless, the existing text-to-image or text-to-video models often generate low-quality human photos that might differ considerably from real-world body structures, referred to as &#34;abnormal human bodies&#34;. Such abnormalities, t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14205v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14205v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14205v1-abstract-full" style="display: none;"> Recent improvements in visual synthesis have significantly enhanced the depiction of generated human photos, which are pivotal due to their wide applicability and demand. Nonetheless, the existing text-to-image or text-to-video models often generate low-quality human photos that might differ considerably from real-world body structures, referred to as &#34;abnormal human bodies&#34;. Such abnormalities, typically deemed unacceptable, pose considerable challenges in the detection and repair of them within human photos. These challenges require precise abnormality recognition capabilities, which entail pinpointing both the location and the abnormality type. Intuitively, Visual Language Models (VLMs) that have obtained remarkable performance on various visual tasks are quite suitable for this task. However, their performance on abnormality detection in human photos is quite poor. Hence, it is quite important to highlight this task for the research community. In this paper, we first introduce a simple yet challenging task, i.e., \textbf{F}ine-grained \textbf{H}uman-body \textbf{A}bnormality \textbf{D}etection \textbf{(FHAD)}, and construct two high-quality datasets for evaluation. Then, we propose a meticulous framework, named HumanCalibrator, which identifies and repairs abnormalities in human body structures while preserving the other content. Experiments indicate that our HumanCalibrator achieves high accuracy in abnormality detection and accomplishes an increase in visual comparisons while preserving the other visual content. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14205v1-abstract-full').style.display = 'none'; document.getElementById('2411.14205v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 14 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13949">arXiv:2411.13949</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13949">pdf</a>, <a href="https://arxiv.org/format/2411.13949">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Separable Mixture of Low-Rank Adaptation for Continual Visual Instruction Tuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Ziqi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Che%2C+C">Chang Che</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Q">Qi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yangyang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+Z">Zenglin Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+M">Meng Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13949v1-abstract-short" style="display: inline;"> Visual instruction tuning (VIT) enables multimodal large language models (MLLMs) to effectively handle a wide range of vision tasks by framing them as language-based instructions. Building on this, continual visual instruction tuning (CVIT) extends the capability of MLLMs to incrementally learn new tasks, accommodating evolving functionalities. While prior work has advanced CVIT through the develo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13949v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13949v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13949v1-abstract-full" style="display: none;"> Visual instruction tuning (VIT) enables multimodal large language models (MLLMs) to effectively handle a wide range of vision tasks by framing them as language-based instructions. Building on this, continual visual instruction tuning (CVIT) extends the capability of MLLMs to incrementally learn new tasks, accommodating evolving functionalities. While prior work has advanced CVIT through the development of new benchmarks and approaches to mitigate catastrophic forgetting, these efforts largely follow traditional continual learning paradigms, neglecting the unique challenges specific to CVIT. We identify a dual form of catastrophic forgetting in CVIT, where MLLMs not only forget previously learned visual understanding but also experience a decline in instruction following abilities as they acquire new tasks. To address this, we introduce the Separable Mixture of Low-Rank Adaptation (SMoLoRA) framework, which employs separable routing through two distinct modules - one for visual understanding and another for instruction following. This dual-routing design enables specialized adaptation in both domains, preventing forgetting while improving performance. Furthermore, we propose a novel CVIT benchmark that goes beyond existing benchmarks by additionally evaluating a model&#39;s ability to generalize to unseen tasks and handle diverse instructions across various tasks. Extensive experiments demonstrate that SMoLoRA outperforms existing methods in mitigating dual forgetting, improving generalization to unseen tasks, and ensuring robustness in following diverse instructions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13949v1-abstract-full').style.display = 'none'; document.getElementById('2411.13949v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13908">arXiv:2411.13908</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13908">pdf</a>, <a href="https://arxiv.org/format/2411.13908">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Hybrid Physics-ML Modeling for Marine Vehicle Maneuvering Motions in the Presence of Environmental Disturbances </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zihao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+J">Jian Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+L">Liang Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Hao%2C+L">Lizhu Hao</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+Y">Yan Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13908v1-abstract-short" style="display: inline;"> A hybrid physics-machine learning modeling framework is proposed for the surface vehicles&#39; maneuvering motions to address the modeling capability and stability in the presence of environmental disturbances. From a deep learning perspective, the framework is based on a variant version of residual networks with additional feature extraction. Initially, an imperfect physical model is derived and iden&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13908v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13908v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13908v1-abstract-full" style="display: none;"> A hybrid physics-machine learning modeling framework is proposed for the surface vehicles&#39; maneuvering motions to address the modeling capability and stability in the presence of environmental disturbances. From a deep learning perspective, the framework is based on a variant version of residual networks with additional feature extraction. Initially, an imperfect physical model is derived and identified to capture the fundamental hydrodynamic characteristics of marine vehicles. This model is then integrated with a feedforward network through a residual block. Additionally, feature extraction from trigonometric transformations is employed in the machine learning component to account for the periodic influence of currents and waves. The proposed method is evaluated using real navigational data from the &#39;JH7500&#39; unmanned surface vehicle. The results demonstrate the robust generalizability and accurate long-term prediction capabilities of the nonlinear dynamic model in specific environmental conditions. This approach has the potential to be extended and applied to develop a comprehensive high-fidelity simulator. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13908v1-abstract-full').style.display = 'none'; document.getElementById('2411.13908v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13740">arXiv:2411.13740</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13740">pdf</a>, <a href="https://arxiv.org/format/2411.13740">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Federated Continual Learning for Edge-AI: A Comprehensive Survey </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+F">Fei Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+F">Feng Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yurui Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+J">Jia Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Min%2C+G">Geyong Min</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13740v1-abstract-short" style="display: inline;"> Edge-AI, the convergence of edge computing and artificial intelligence (AI), has become a promising paradigm that enables the deployment of advanced AI models at the network edge, close to users. In Edge-AI, federated continual learning (FCL) has emerged as an imperative framework, which fuses knowledge from different clients while preserving data privacy and retaining knowledge from previous task&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13740v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13740v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13740v1-abstract-full" style="display: none;"> Edge-AI, the convergence of edge computing and artificial intelligence (AI), has become a promising paradigm that enables the deployment of advanced AI models at the network edge, close to users. In Edge-AI, federated continual learning (FCL) has emerged as an imperative framework, which fuses knowledge from different clients while preserving data privacy and retaining knowledge from previous tasks as it learns new ones. By so doing, FCL aims to ensure stable and reliable performance of learning models in dynamic and distributed environments. In this survey, we thoroughly review the state-of-the-art research and present the first comprehensive survey of FCL for Edge-AI. We categorize FCL methods based on three task characteristics: federated class continual learning, federated domain continual learning, and federated task continual learning. For each category, an in-depth investigation and review of the representative methods are provided, covering background, challenges, problem formalisation, solutions, and limitations. Besides, existing real-world applications empowered by FCL are reviewed, indicating the current progress and potential of FCL in diverse application domains. Furthermore, we discuss and highlight several prospective research directions of FCL such as algorithm-hardware co-design for FCL and FCL with foundation models, which could provide insights into the future development and practical deployment of FCL in the era of Edge-AI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13740v1-abstract-full').style.display = 'none'; document.getElementById('2411.13740v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13620">arXiv:2411.13620</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13620">pdf</a>, <a href="https://arxiv.org/format/2411.13620">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Robust SG-NeRF: Robust Scene Graph Aided Neural Surface Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gu%2C+Y">Yi Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+D">Dongjun Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhaorui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jiaxu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+J">Jiahang Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+R">Renjing Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13620v1-abstract-short" style="display: inline;"> Neural surface reconstruction relies heavily on accurate camera poses as input. Despite utilizing advanced pose estimators like COLMAP or ARKit, camera poses can still be noisy. Existing pose-NeRF joint optimization methods handle poses with small noise (inliers) effectively but struggle with large noise (outliers), such as mirrored poses. In this work, we focus on mitigating the impact of outlier&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13620v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13620v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13620v1-abstract-full" style="display: none;"> Neural surface reconstruction relies heavily on accurate camera poses as input. Despite utilizing advanced pose estimators like COLMAP or ARKit, camera poses can still be noisy. Existing pose-NeRF joint optimization methods handle poses with small noise (inliers) effectively but struggle with large noise (outliers), such as mirrored poses. In this work, we focus on mitigating the impact of outlier poses. Our method integrates an inlier-outlier confidence estimation scheme, leveraging scene graph information gathered during the data preparation phase. Unlike previous works directly using rendering metrics as the reference, we employ a detached color network that omits the viewing direction as input to minimize the impact caused by shape-radiance ambiguities. This enhanced confidence updating strategy effectively differentiates between inlier and outlier poses, allowing us to sample more rays from inlier poses to construct more reliable radiance fields. Additionally, we introduce a re-projection loss based on the current Signed Distance Function (SDF) and pose estimations, strengthening the constraints between matching image pairs. For outlier poses, we adopt a Monte Carlo re-localization method to find better solutions. We also devise a scene graph updating strategy to provide more accurate information throughout the training process. We validate our approach on the SG-NeRF and DTU datasets. Experimental results on various datasets demonstrate that our methods can consistently improve the reconstruction qualities and pose accuracies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13620v1-abstract-full').style.display = 'none'; document.getElementById('2411.13620v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">https://rsg-nerf.github.io/RSG-NeRF/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13611">arXiv:2411.13611</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13611">pdf</a>, <a href="https://arxiv.org/format/2411.13611">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> DSTC: Direct Preference Learning with Only Self-Generated Tests and Code to Improve Code LMs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhihan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+S">Shenao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yongfei Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+B">Boyi Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yingxiang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhaoran Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13611v1-abstract-short" style="display: inline;"> Direct preference learning offers a promising and computation-efficient beyond supervised fine-tuning (SFT) for improving code generation in coding large language models (LMs). However, the scarcity of reliable preference data is a bottleneck for the performance of direct preference learning to improve the coding accuracy of code LMs. In this paper, we introduce \underline{\textbf{D}}irect Prefere&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13611v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13611v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13611v1-abstract-full" style="display: none;"> Direct preference learning offers a promising and computation-efficient beyond supervised fine-tuning (SFT) for improving code generation in coding large language models (LMs). However, the scarcity of reliable preference data is a bottleneck for the performance of direct preference learning to improve the coding accuracy of code LMs. In this paper, we introduce \underline{\textbf{D}}irect Preference Learning with Only \underline{\textbf{S}}elf-Generated \underline{\textbf{T}}ests and \underline{\textbf{C}}ode (DSTC), a framework that leverages only self-generated code snippets and tests to construct reliable preference pairs such that direct preference learning can improve LM coding accuracy without external annotations. DSTC combines a minimax selection process and test-code concatenation to improve preference pair quality, reducing the influence of incorrect self-generated tests and enhancing model performance without the need for costly reward models. When applied with direct preference learning methods such as Direct Preference Optimization (DPO) and Kahneman-Tversky Optimization (KTO), DSTC yields stable improvements in coding accuracy (pass@1 score) across diverse coding benchmarks, including HumanEval, MBPP, and BigCodeBench, demonstrating both its effectiveness and scalability for models of various sizes. This approach autonomously enhances code generation accuracy across LLMs of varying sizes, reducing reliance on expensive annotated coding datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13611v1-abstract-full').style.display = 'none'; document.getElementById('2411.13611v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13609">arXiv:2411.13609</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13609">pdf</a>, <a href="https://arxiv.org/format/2411.13609">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> What You See Is What Matters: A Novel Visual and Physics-Based Metric for Evaluating Video Generation Quality </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zihan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+S">Songlin Li</a>, <a href="/search/cs?searchtype=author&amp;query=Hao%2C+L">Lingyan Hao</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+B">Bowen Song</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+X">Xinyu Hu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13609v1-abstract-short" style="display: inline;"> As video generation models advance rapidly, assessing the quality of generated videos has become increasingly critical. Existing metrics, such as Fr茅chet Video Distance (FVD), Inception Score (IS), and ClipSim, measure quality primarily in latent space rather than from a human visual perspective, often overlooking key aspects like appearance and motion consistency to physical laws. In this paper,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13609v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13609v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13609v1-abstract-full" style="display: none;"> As video generation models advance rapidly, assessing the quality of generated videos has become increasingly critical. Existing metrics, such as Fr茅chet Video Distance (FVD), Inception Score (IS), and ClipSim, measure quality primarily in latent space rather than from a human visual perspective, often overlooking key aspects like appearance and motion consistency to physical laws. In this paper, we propose a novel metric, VAMP (Visual Appearance and Motion Plausibility), that evaluates both the visual appearance and physical plausibility of generated videos. VAMP is composed of two main components: an appearance score, which assesses color, shape, and texture consistency across frames, and a motion score, which evaluates the realism of object movements. We validate VAMP through two experiments: corrupted video evaluation and generated video evaluation. In the corrupted video evaluation, we introduce various types of corruptions into real videos and measure the correlation between corruption severity and VAMP scores. In the generated video evaluation, we use state-of-the-art models to generate videos from carefully designed prompts and compare VAMP&#39;s performance to human evaluators&#39; rankings. Our results demonstrate that VAMP effectively captures both visual fidelity and temporal consistency, offering a more comprehensive evaluation of video quality than traditional methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13609v1-abstract-full').style.display = 'none'; document.getElementById('2411.13609v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13577">arXiv:2411.13577</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13577">pdf</a>, <a href="https://arxiv.org/format/2411.13577">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> </div> <p class="title is-5 mathjax"> WavChat: A Survey of Spoken Dialogue Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ji%2C+S">Shengpeng Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yifu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+M">Minghui Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Zuo%2C+J">Jialong Zuo</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Jingyu Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hanting Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Ziyue Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+L">Long Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shujie Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+X">Xize Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+X">Xiaoda Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zehan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Q">Qian Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+J">Jian Li</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Y">Yidi Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+J">Jingzhen He</a>, <a href="/search/cs?searchtype=author&amp;query=Chu%2C+Y">Yunfei Chu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+J">Jin Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Z">Zhou Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13577v1-abstract-short" style="display: inline;"> Recent advancements in spoken dialogue models, exemplified by systems like GPT-4o, have captured significant attention in the speech domain. Compared to traditional three-tier cascaded spoken dialogue models that comprise speech recognition (ASR), large language models (LLMs), and text-to-speech (TTS), modern spoken dialogue models exhibit greater intelligence. These advanced spoken dialogue model&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13577v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13577v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13577v1-abstract-full" style="display: none;"> Recent advancements in spoken dialogue models, exemplified by systems like GPT-4o, have captured significant attention in the speech domain. Compared to traditional three-tier cascaded spoken dialogue models that comprise speech recognition (ASR), large language models (LLMs), and text-to-speech (TTS), modern spoken dialogue models exhibit greater intelligence. These advanced spoken dialogue models not only comprehend audio, music, and other speech-related features, but also capture stylistic and timbral characteristics in speech. Moreover, they generate high-quality, multi-turn speech responses with low latency, enabling real-time interaction through simultaneous listening and speaking capability. Despite the progress in spoken dialogue systems, there is a lack of comprehensive surveys that systematically organize and analyze these systems and the underlying technologies. To address this, we have first compiled existing spoken dialogue systems in the chronological order and categorized them into the cascaded and end-to-end paradigms. We then provide an in-depth overview of the core technologies in spoken dialogue models, covering aspects such as speech representation, training paradigm, streaming, duplex, and interaction capabilities. Each section discusses the limitations of these technologies and outlines considerations for future research. Additionally, we present a thorough review of relevant datasets, evaluation metrics, and benchmarks from the perspectives of training and evaluating spoken dialogue systems. We hope this survey will contribute to advancing both academic research and industrial applications in the field of spoken dialogue systems. The related material is available at https://github.com/jishengpeng/WavChat. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13577v1-abstract-full').style.display = 'none'; document.getElementById('2411.13577v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">60 papes, working in progress</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13425">arXiv:2411.13425</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13425">pdf</a>, <a href="https://arxiv.org/format/2411.13425">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> WaterPark: A Robustness Assessment of Language Model Watermarking </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liang%2C+J">Jiacheng Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zian Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+L">Lauren Hong</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+S">Shouling Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+T">Ting Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13425v1-abstract-short" style="display: inline;"> To mitigate the misuse of large language models (LLMs), such as disinformation, automated phishing, and academic cheating, there is a pressing need for the capability of identifying LLM-generated texts. Watermarking emerges as one promising solution: it plants statistical signals into LLMs&#39; generative processes and subsequently verifies whether LLMs produce given texts. Various watermarking method&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13425v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13425v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13425v1-abstract-full" style="display: none;"> To mitigate the misuse of large language models (LLMs), such as disinformation, automated phishing, and academic cheating, there is a pressing need for the capability of identifying LLM-generated texts. Watermarking emerges as one promising solution: it plants statistical signals into LLMs&#39; generative processes and subsequently verifies whether LLMs produce given texts. Various watermarking methods (``watermarkers&#39;&#39;) have been proposed; yet, due to the lack of unified evaluation platforms, many critical questions remain under-explored: i) What are the strengths/limitations of various watermarkers, especially their attack robustness? ii) How do various design choices impact their robustness? iii) How to optimally operate watermarkers in adversarial environments? To fill this gap, we systematize existing LLM watermarkers and watermark removal attacks, mapping out their design spaces. We then develop WaterPark, a unified platform that integrates 10 state-of-the-art watermarkers and 12 representative attacks. More importantly, leveraging WaterPark, we conduct a comprehensive assessment of existing watermarkers, unveiling the impact of various design choices on their attack robustness. For instance, a watermarker&#39;s resilience to increasingly intensive attacks hinges on its context dependency. We further explore the best practices to operate watermarkers in adversarial environments. For instance, using a generic detector alongside a watermark-specific detector improves the security of vulnerable watermarkers. We believe our study sheds light on current LLM watermarking techniques while WaterPark serves as a valuable testbed to facilitate future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13425v1-abstract-full').style.display = 'none'; document.getElementById('2411.13425v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13322">arXiv:2411.13322</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13322">pdf</a>, <a href="https://arxiv.org/format/2411.13322">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Scaling Laws for Online Advertisement Retrieval </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yunli Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Zixuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhiqiang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jian Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wen%2C+S">Shiyang Wen</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+P">Peng Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Gai%2C+K">Kun Gai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13322v1-abstract-short" style="display: inline;"> The scaling law is a notable property of neural network models and has significantly propelled the development of large language models. Scaling laws hold great promise in guiding model design and resource allocation. Recent research increasingly shows that scaling laws are not limited to NLP tasks or Transformer architectures; they also apply to domains such as recommendation. However, there is s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13322v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13322v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13322v1-abstract-full" style="display: none;"> The scaling law is a notable property of neural network models and has significantly propelled the development of large language models. Scaling laws hold great promise in guiding model design and resource allocation. Recent research increasingly shows that scaling laws are not limited to NLP tasks or Transformer architectures; they also apply to domains such as recommendation. However, there is still a lack of literature on scaling law research in online advertisement retrieval systems. This may be because 1) identifying the scaling law for resource cost and online revenue is often expensive in both time and training resources for large-scale industrial applications, and 2) varying settings for different systems prevent the scaling law from being applied across various scenarios. To address these issues, we propose a lightweight paradigm to identify the scaling law of online revenue and machine cost for a certain online advertisement retrieval scenario with a low experimental cost. Specifically, we focus on a sole factor (FLOPs) and propose an offline metric named R/R* that exhibits a high linear correlation with online revenue for retrieval models. We estimate the machine cost offline via a simulation algorithm. Thus, we can transform most online experiments into low-cost offline experiments. We conduct comprehensive experiments to verify the effectiveness of our proposed metric R/R* and to identify the scaling law in the online advertisement retrieval system of Kuaishou. With the scaling law, we demonstrate practical applications for ROI-constrained model designing and multi-scenario resource allocation in Kuaishou advertising system. To the best of our knowledge, this is the first work to study the scaling laws for online advertisement retrieval of real-world systems, showing great potential for scaling law in advertising system optimization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13322v1-abstract-full').style.display = 'none'; document.getElementById('2411.13322v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13244">arXiv:2411.13244</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13244">pdf</a>, <a href="https://arxiv.org/format/2411.13244">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Leveraging Prior Experience: An Expandable Auxiliary Knowledge Base for Text-to-SQL </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chu%2C+Z">Zhibo Chu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zichong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Qin%2C+Q">Qitao Qin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13244v1-abstract-short" style="display: inline;"> Large Language Models (LLMs) exhibit impressive problem-solving skills across many tasks, but they still underperform compared to humans in various downstream applications, such as text-to-SQL. On the BIRD benchmark leaderboard, human performance achieves an accuracy of 92.96\%, whereas the top-performing method reaches only 72.39\%. Notably, these state-of-the-art (SoTA) methods predominantly rel&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13244v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13244v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13244v1-abstract-full" style="display: none;"> Large Language Models (LLMs) exhibit impressive problem-solving skills across many tasks, but they still underperform compared to humans in various downstream applications, such as text-to-SQL. On the BIRD benchmark leaderboard, human performance achieves an accuracy of 92.96\%, whereas the top-performing method reaches only 72.39\%. Notably, these state-of-the-art (SoTA) methods predominantly rely on in-context learning to simulate human-like reasoning. However, they overlook a critical human skill: continual learning. Inspired by the educational practice of maintaining mistake notebooks during our formative years, we propose LPE-SQL (Leveraging Prior Experience: An Expandable Auxiliary Knowledge Base for Text-to-SQL), a novel framework designed to augment LLMs by enabling continual learning without requiring parameter fine-tuning. LPE-SQL consists of four modules that \textbf{i)} retrieve relevant entries, \textbf{ii)} efficient sql generation, \textbf{iii)} generate the final result through a cross-consistency mechanism and \textbf{iv)} log successful and failed tasks along with their reasoning processes or reflection-generated tips. Importantly, the core module of LPE-SQL is the fourth one, while the other modules employ foundational methods, allowing LPE-SQL to be easily integrated with SoTA technologies to further enhance performance. Our experimental results demonstrate that this continual learning approach yields substantial performance gains, with the smaller Llama-3.1-70B model with surpassing the performance of the larger Llama-3.1-405B model using SoTA methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13244v1-abstract-full').style.display = 'none'; document.getElementById('2411.13244v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13243">arXiv:2411.13243</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13243">pdf</a>, <a href="https://arxiv.org/format/2411.13243">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> XMask3D: Cross-modal Mask Reasoning for Open Vocabulary 3D Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Ziyi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yanbo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+X">Xumin Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+J">Jie Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Jiwen Lu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13243v1-abstract-short" style="display: inline;"> Existing methodologies in open vocabulary 3D semantic segmentation primarily concentrate on establishing a unified feature space encompassing 3D, 2D, and textual modalities. Nevertheless, traditional techniques such as global feature alignment or vision-language model distillation tend to impose only approximate correspondence, struggling notably with delineating fine-grained segmentation boundari&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13243v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13243v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13243v1-abstract-full" style="display: none;"> Existing methodologies in open vocabulary 3D semantic segmentation primarily concentrate on establishing a unified feature space encompassing 3D, 2D, and textual modalities. Nevertheless, traditional techniques such as global feature alignment or vision-language model distillation tend to impose only approximate correspondence, struggling notably with delineating fine-grained segmentation boundaries. To address this gap, we propose a more meticulous mask-level alignment between 3D features and the 2D-text embedding space through a cross-modal mask reasoning framework, XMask3D. In our approach, we developed a mask generator based on the denoising UNet from a pre-trained diffusion model, leveraging its capability for precise textual control over dense pixel representations and enhancing the open-world adaptability of the generated masks. We further integrate 3D global features as implicit conditions into the pre-trained 2D denoising UNet, enabling the generation of segmentation masks with additional 3D geometry awareness. Subsequently, the generated 2D masks are employed to align mask-level 3D representations with the vision-language feature space, thereby augmenting the open vocabulary capability of 3D geometry embeddings. Finally, we fuse complementary 2D and 3D mask features, resulting in competitive performance across multiple benchmarks for 3D open vocabulary semantic segmentation. Code is available at https://github.com/wangzy22/XMask3D. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13243v1-abstract-full').style.display = 'none'; document.getElementById('2411.13243v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to NeurIPS 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13121">arXiv:2411.13121</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13121">pdf</a>, <a href="https://arxiv.org/format/2411.13121">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> ReinFog: A DRL Empowered Framework for Resource Management in Edge and Cloud Computing Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhiyu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Goudarzi%2C+M">Mohammad Goudarzi</a>, <a href="/search/cs?searchtype=author&amp;query=Buyya%2C+R">Rajkumar Buyya</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13121v1-abstract-short" style="display: inline;"> The growing IoT landscape requires effective server deployment strategies to meet demands including real-time processing and energy efficiency. This is complicated by heterogeneous, dynamic applications and servers. To address these challenges, we propose ReinFog, a modular distributed software empowered with Deep Reinforcement Learning (DRL) for adaptive resource management across edge/fog and cl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13121v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13121v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13121v1-abstract-full" style="display: none;"> The growing IoT landscape requires effective server deployment strategies to meet demands including real-time processing and energy efficiency. This is complicated by heterogeneous, dynamic applications and servers. To address these challenges, we propose ReinFog, a modular distributed software empowered with Deep Reinforcement Learning (DRL) for adaptive resource management across edge/fog and cloud environments. ReinFog enables the practical development/deployment of various centralized and distributed DRL techniques for resource management in edge/fog and cloud computing environments. It also supports integrating native and library-based DRL techniques for diverse IoT application scheduling objectives. Additionally, ReinFog allows for customizing deployment configurations for different DRL techniques, including the number and placement of DRL Learners and DRL Workers in large-scale distributed systems. Besides, we propose a novel Memetic Algorithm for DRL Component (e.g., DRL Learners and DRL Workers) Placement in ReinFog named MADCP, which combines the strengths of Genetic Algorithm, Firefly Algorithm, and Particle Swarm Optimization. Experiments reveal that the DRL mechanisms developed within ReinFog have significantly enhanced both centralized and distributed DRL techniques implementation. These advancements have resulted in notable improvements in IoT application performance, reducing response time by 45%, energy consumption by 39%, and weighted cost by 37%, while maintaining minimal scheduling overhead. Additionally, ReinFog exhibits remarkable scalability, with a rise in DRL Workers from 1 to 30 causing only a 0.3-second increase in startup time and around 2 MB more RAM per Worker. The proposed MADCP for DRL component placement further accelerates the convergence rate of DRL techniques by up to 38%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13121v1-abstract-full').style.display = 'none'; document.getElementById('2411.13121v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12960">arXiv:2411.12960</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12960">pdf</a>, <a href="https://arxiv.org/format/2411.12960">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> I Can Tell What I am Doing: Toward Real-World Natural Language Grounding of Robot Experiences </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zihan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+B">Brian Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Dhat%2C+V">Varad Dhat</a>, <a href="/search/cs?searchtype=author&amp;query=Brumbaugh%2C+Z">Zander Brumbaugh</a>, <a href="/search/cs?searchtype=author&amp;query=Walker%2C+N">Nick Walker</a>, <a href="/search/cs?searchtype=author&amp;query=Krishna%2C+R">Ranjay Krishna</a>, <a href="/search/cs?searchtype=author&amp;query=Cakmak%2C+M">Maya Cakmak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12960v1-abstract-short" style="display: inline;"> Understanding robot behaviors and experiences through natural language is crucial for developing intelligent and transparent robotic systems. Recent advancement in large language models (LLMs) makes it possible to translate complex, multi-modal robotic experiences into coherent, human-readable narratives. However, grounding real-world robot experiences into natural language is challenging due to m&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12960v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12960v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12960v1-abstract-full" style="display: none;"> Understanding robot behaviors and experiences through natural language is crucial for developing intelligent and transparent robotic systems. Recent advancement in large language models (LLMs) makes it possible to translate complex, multi-modal robotic experiences into coherent, human-readable narratives. However, grounding real-world robot experiences into natural language is challenging due to many reasons, such as multi-modal nature of data, differing sample rates, and data volume. We introduce RONAR, an LLM-based system that generates natural language narrations from robot experiences, aiding in behavior announcement, failure analysis, and human interaction to recover failure. Evaluated across various scenarios, RONAR outperforms state-of-the-art methods and improves failure recovery efficiency. Our contributions include a multi-modal framework for robot experience narration, a comprehensive real-robot dataset, and empirical evidence of RONAR&#39;s effectiveness in enhancing user experience in system transparency and failure analysis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12960v1-abstract-full').style.display = 'none'; document.getElementById('2411.12960v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12882">arXiv:2411.12882</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12882">pdf</a>, <a href="https://arxiv.org/format/2411.12882">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> ProSec: Fortifying Code LLMs with Proactive Security Alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+X">Xiangzhe Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+Z">Zian Su</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+J">Jinyao Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+K">Kaiyuan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhenting Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xiangyu Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12882v1-abstract-short" style="display: inline;"> Recent advances in code-specific large language models (LLMs) have greatly enhanced code generation and refinement capabilities. However, the safety of code LLMs remains under-explored, posing potential risks as insecure code generated by these models may introduce vulnerabilities into real-world systems. Previous work proposes to collect security-focused instruction-tuning dataset from real-world&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12882v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12882v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12882v1-abstract-full" style="display: none;"> Recent advances in code-specific large language models (LLMs) have greatly enhanced code generation and refinement capabilities. However, the safety of code LLMs remains under-explored, posing potential risks as insecure code generated by these models may introduce vulnerabilities into real-world systems. Previous work proposes to collect security-focused instruction-tuning dataset from real-world vulnerabilities. It is constrained by the data sparsity of vulnerable code, and has limited applicability in the iterative post-training workflows of modern LLMs. In this paper, we propose ProSec, a novel proactive security alignment approach designed to align code LLMs with secure coding practices. ProSec systematically exposes the vulnerabilities in a code LLM by synthesizing error-inducing coding scenarios from Common Weakness Enumerations (CWEs), and generates fixes to vulnerable code snippets, allowing the model to learn secure practices through advanced preference learning objectives. The scenarios synthesized by ProSec triggers 25 times more vulnerable code than a normal instruction-tuning dataset, resulting in a security-focused alignment dataset 7 times larger than the previous work. Experiments show that models trained with ProSec is 29.2% to 35.5% more secure compared to previous work, with a marginal negative effect of less than 2 percentage points on model&#39;s utility. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12882v1-abstract-full').style.display = 'none'; document.getElementById('2411.12882v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The first two authors contributed equally to this work</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12547">arXiv:2411.12547</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12547">pdf</a>, <a href="https://arxiv.org/format/2411.12547">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> S3TU-Net: Structured Convolution and Superpixel Transformer for Lung Nodule Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yuke Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xiang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+Y">Yunyu Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xinyi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhenglei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">YuQing Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S+H">Shuo Hong Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12547v1-abstract-short" style="display: inline;"> The irregular and challenging characteristics of lung adenocarcinoma nodules in computed tomography (CT) images complicate staging diagnosis, making accurate segmentation critical for clinicians to extract detailed lesion information. In this study, we propose a segmentation model, S3TU-Net, which integrates multi-dimensional spatial connectors and a superpixel-based visual transformer. S3TU-Net i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12547v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12547v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12547v1-abstract-full" style="display: none;"> The irregular and challenging characteristics of lung adenocarcinoma nodules in computed tomography (CT) images complicate staging diagnosis, making accurate segmentation critical for clinicians to extract detailed lesion information. In this study, we propose a segmentation model, S3TU-Net, which integrates multi-dimensional spatial connectors and a superpixel-based visual transformer. S3TU-Net is built on a multi-view CNN-Transformer hybrid architecture, incorporating superpixel algorithms, structured weighting, and spatial shifting techniques to achieve superior segmentation performance. The model leverages structured convolution blocks (DWF-Conv/D2BR-Conv) to extract multi-scale local features while mitigating overfitting. To enhance multi-scale feature fusion, we introduce the S2-MLP Link, integrating spatial shifting and attention mechanisms at the skip connections. Additionally, the residual-based superpixel visual transformer (RM-SViT) effectively merges global and local features by employing sparse correlation learning and multi-branch attention to capture long-range dependencies, with residual connections enhancing stability and computational efficiency. Experimental results on the LIDC-IDRI dataset demonstrate that S3TU-Net achieves a DSC, precision, and IoU of 89.04%, 90.73%, and 90.70%, respectively. Compared to recent methods, S3TU-Net improves DSC by 4.52% and sensitivity by 3.16%, with other metrics showing an approximate 2% increase. In addition to comparison and ablation studies, we validated the generalization ability of our model on the EPDB private dataset, achieving a DSC of 86.40%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12547v1-abstract-full').style.display = 'none'; document.getElementById('2411.12547v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12478">arXiv:2411.12478</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12478">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Robotic transcatheter tricuspid valve replacement with hybrid enhanced intelligence: a new paradigm and first-in-vivo study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shuangyi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+H">Haichuan Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Y">Yiping Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Ziqi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+D">Dong Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+L">Longyue Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Hou%2C+X">Xilong Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+X">Xiao-Hu Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+S">Shengtao Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+F">Fei Pan</a>, <a href="/search/cs?searchtype=author&amp;query=So%2C+K+C">Kent Chak-Yu So</a>, <a href="/search/cs?searchtype=author&amp;query=Hou%2C+Z">Zeng-Guang Hou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12478v1-abstract-short" style="display: inline;"> Transcatheter tricuspid valve replacement (TTVR) is the latest treatment for tricuspid regurgitation and is in the early stages of clinical adoption. Intelligent robotic approaches are expected to overcome the challenges of surgical manipulation and widespread dissemination, but systems and protocols with high clinical utility have not yet been reported. In this study, we propose a complete soluti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12478v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12478v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12478v1-abstract-full" style="display: none;"> Transcatheter tricuspid valve replacement (TTVR) is the latest treatment for tricuspid regurgitation and is in the early stages of clinical adoption. Intelligent robotic approaches are expected to overcome the challenges of surgical manipulation and widespread dissemination, but systems and protocols with high clinical utility have not yet been reported. In this study, we propose a complete solution that includes a passive stabilizer, robotic drive, detachable delivery catheter and valve manipulation mechanism. Working towards autonomy, a hybrid augmented intelligence approach based on reinforcement learning, Monte Carlo probabilistic maps and human-robot co-piloted control was introduced. Systematic tests in phantom and first-in-vivo animal experiments were performed to verify that the system design met the clinical requirement. Furthermore, the experimental results confirmed the advantages of co-piloted control over conventional master-slave control in terms of time efficiency, control efficiency, autonomy and stability of operation. In conclusion, this study provides a comprehensive pathway for robotic TTVR and, to our knowledge, completes the first animal study that not only successfully demonstrates the application of hybrid enhanced intelligence in interventional robotics, but also provides a solution with high application value for a cutting-edge procedure. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12478v1-abstract-full').style.display = 'none'; document.getElementById('2411.12478v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12471">arXiv:2411.12471</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12471">pdf</a>, <a href="https://arxiv.org/format/2411.12471">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SCIGS: 3D Gaussians Splatting from a Snapshot Compressive Image </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zixu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+H">Hao Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+Y">Yu Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+F">Fei Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12471v1-abstract-short" style="display: inline;"> Snapshot Compressive Imaging (SCI) offers a possibility for capturing information in high-speed dynamic scenes, requiring efficient reconstruction method to recover scene information. Despite promising results, current deep learning-based and NeRF-based reconstruction methods face challenges: 1) deep learning-based reconstruction methods struggle to maintain 3D structural consistency within scenes&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12471v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12471v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12471v1-abstract-full" style="display: none;"> Snapshot Compressive Imaging (SCI) offers a possibility for capturing information in high-speed dynamic scenes, requiring efficient reconstruction method to recover scene information. Despite promising results, current deep learning-based and NeRF-based reconstruction methods face challenges: 1) deep learning-based reconstruction methods struggle to maintain 3D structural consistency within scenes, and 2) NeRF-based reconstruction methods still face limitations in handling dynamic scenes. To address these challenges, we propose SCIGS, a variant of 3DGS, and develop a primitive-level transformation network that utilizes camera pose stamps and Gaussian primitive coordinates as embedding vectors. This approach resolves the necessity of camera pose in vanilla 3DGS and enhances multi-view 3D structural consistency in dynamic scenes by utilizing transformed primitives. Additionally, a high-frequency filter is introduced to eliminate the artifacts generated during the transformation. The proposed SCIGS is the first to reconstruct a 3D explicit scene from a single compressed image, extending its application to dynamic 3D scenes. Experiments on both static and dynamic scenes demonstrate that SCIGS not only enhances SCI decoding but also outperforms current state-of-the-art methods in reconstructing dynamic 3D scenes from a single compressed image. The code will be made available upon publication. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12471v1-abstract-full').style.display = 'none'; document.getElementById('2411.12471v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12440">arXiv:2411.12440</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12440">pdf</a>, <a href="https://arxiv.org/format/2411.12440">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Beyond Gaussians: Fast and High-Fidelity 3D Splatting with Linear Kernels </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Haodong Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+R">Runnan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Qu%2C+Q">Qiang Qu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhaoqing Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+T">Tongliang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xiaoming Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chung%2C+Y+Y">Yuk Ying Chung</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12440v2-abstract-short" style="display: inline;"> Recent advancements in 3D Gaussian Splatting (3DGS) have substantially improved novel view synthesis, enabling high-quality reconstruction and real-time rendering. However, blurring artifacts, such as floating primitives and over-reconstruction, remain challenging. Current methods address these issues by refining scene structure, enhancing geometric representations, addressing blur in training ima&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12440v2-abstract-full').style.display = 'inline'; document.getElementById('2411.12440v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12440v2-abstract-full" style="display: none;"> Recent advancements in 3D Gaussian Splatting (3DGS) have substantially improved novel view synthesis, enabling high-quality reconstruction and real-time rendering. However, blurring artifacts, such as floating primitives and over-reconstruction, remain challenging. Current methods address these issues by refining scene structure, enhancing geometric representations, addressing blur in training images, improving rendering consistency, and optimizing density control, yet the role of kernel design remains underexplored. We identify the soft boundaries of Gaussian ellipsoids as one of the causes of these artifacts, limiting detail capture in high-frequency regions. To bridge this gap, we introduce 3D Linear Splatting (3DLS), which replaces Gaussian kernels with linear kernels to achieve sharper and more precise results, particularly in high-frequency regions. Through evaluations on three datasets, 3DLS demonstrates state-of-the-art fidelity and accuracy, along with a 30% FPS improvement over baseline 3DGS. The implementation will be made publicly available upon acceptance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12440v2-abstract-full').style.display = 'none'; document.getElementById('2411.12440v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12301">arXiv:2411.12301</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12301">pdf</a>, <a href="https://arxiv.org/format/2411.12301">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Physics-Guided Detector for SAR Airplanes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Z">Zhongling Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+L">Long Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+S">Shuxin Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhirui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+G">Gong Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Junwei Han</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12301v1-abstract-short" style="display: inline;"> The disperse structure distributions (discreteness) and variant scattering characteristics (variability) of SAR airplane targets lead to special challenges of object detection and recognition. The current deep learning-based detectors encounter challenges in distinguishing fine-grained SAR airplanes against complex backgrounds. To address it, we propose a novel physics-guided detector (PGD) learni&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12301v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12301v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12301v1-abstract-full" style="display: none;"> The disperse structure distributions (discreteness) and variant scattering characteristics (variability) of SAR airplane targets lead to special challenges of object detection and recognition. The current deep learning-based detectors encounter challenges in distinguishing fine-grained SAR airplanes against complex backgrounds. To address it, we propose a novel physics-guided detector (PGD) learning paradigm for SAR airplanes that comprehensively investigate their discreteness and variability to improve the detection performance. It is a general learning paradigm that can be extended to different existing deep learning-based detectors with &#34;backbone-neck-head&#34; architectures. The main contributions of PGD include the physics-guided self-supervised learning, feature enhancement, and instance perception, denoted as PGSSL, PGFE, and PGIP, respectively. PGSSL aims to construct a self-supervised learning task based on a wide range of SAR airplane targets that encodes the prior knowledge of various discrete structure distributions into the embedded space. Then, PGFE enhances the multi-scale feature representation of a detector, guided by the physics-aware information learned from PGSSL. PGIP is constructed at the detection head to learn the refined and dominant scattering point of each SAR airplane instance, thus alleviating the interference from the complex background. We propose two implementations, denoted as PGD and PGD-Lite, and apply them to various existing detectors with different backbones and detection heads. The experiments demonstrate the flexibility and effectiveness of the proposed PGD, which can improve existing detectors on SAR airplane detection with fine-grained classification task (an improvement of 3.1\% mAP most), and achieve the state-of-the-art performance (90.7\% mAP) on SAR-AIRcraft-1.0 dataset. The project is open-source at \url{https://github.com/XAI4SAR/PGD}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12301v1-abstract-full').style.display = 'none'; document.getElementById('2411.12301v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12286">arXiv:2411.12286</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12286">pdf</a>, <a href="https://arxiv.org/format/2411.12286">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> GLOVER: Generalizable Open-Vocabulary Affordance Reasoning for Task-Oriented Grasping </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ma%2C+T">Teli Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zifan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+J">Jiaming Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+M">Mengmeng Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+J">Junwei Liang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12286v1-abstract-short" style="display: inline;"> Inferring affordable (i.e., graspable) parts of arbitrary objects based on human specifications is essential for robots advancing toward open-vocabulary manipulation. Current grasp planners, however, are hindered by limited vision-language comprehension and time-consuming 3D radiance modeling, restricting real-time, open-vocabulary interactions with objects. To address these limitations, we propos&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12286v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12286v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12286v1-abstract-full" style="display: none;"> Inferring affordable (i.e., graspable) parts of arbitrary objects based on human specifications is essential for robots advancing toward open-vocabulary manipulation. Current grasp planners, however, are hindered by limited vision-language comprehension and time-consuming 3D radiance modeling, restricting real-time, open-vocabulary interactions with objects. To address these limitations, we propose GLOVER, a unified Generalizable Open-Vocabulary Affordance Reasoning framework, which fine-tunes the Large Language Models (LLMs) to predict visual affordance of graspable object parts within RGB feature space. We compile a dataset of over 10,000 images from human-object interactions, annotated with unified visual and linguistic affordance labels, to enable multi-modal fine-tuning. GLOVER inherits world knowledge and common-sense reasoning from LLMs, facilitating more fine-grained object understanding and sophisticated tool-use reasoning. To enable effective real-world deployment, we present Affordance-Aware Grasping Estimation (AGE), a non-parametric grasp planner that aligns the gripper pose with a superquadric surface derived from affordance data. In evaluations across 30 real-world scenes, GLOVER achieves success rates of 86.0% in part identification and 76.3% in grasping, with speeds approximately 330 times faster in affordance reasoning and 40 times faster in grasping pose estimation than the previous state-of-the-art. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12286v1-abstract-full').style.display = 'none'; document.getElementById('2411.12286v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12177">arXiv:2411.12177</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12177">pdf</a>, <a href="https://arxiv.org/format/2411.12177">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Robust 3D Semantic Occupancy Prediction with Calibration-free Spatial Transformation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhuang%2C+Z">Zhuangwei Zhuang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Ziyin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+S">Sitao Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+L">Lizhao Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+H">Hui Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+M">Mingkui Tan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12177v1-abstract-short" style="display: inline;"> 3D semantic occupancy prediction, which seeks to provide accurate and comprehensive representations of environment scenes, is important to autonomous driving systems. For autonomous cars equipped with multi-camera and LiDAR, it is critical to aggregate multi-sensor information into a unified 3D space for accurate and robust predictions. Recent methods are mainly built on the 2D-to-3D transformatio&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12177v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12177v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12177v1-abstract-full" style="display: none;"> 3D semantic occupancy prediction, which seeks to provide accurate and comprehensive representations of environment scenes, is important to autonomous driving systems. For autonomous cars equipped with multi-camera and LiDAR, it is critical to aggregate multi-sensor information into a unified 3D space for accurate and robust predictions. Recent methods are mainly built on the 2D-to-3D transformation that relies on sensor calibration to project the 2D image information into the 3D space. These methods, however, suffer from two major limitations: First, they rely on accurate sensor calibration and are sensitive to the calibration noise, which limits their application in real complex environments. Second, the spatial transformation layers are computationally expensive and limit their running on an autonomous vehicle. In this work, we attempt to exploit a Robust and Efficient 3D semantic Occupancy (REO) prediction scheme. To this end, we propose a calibration-free spatial transformation based on vanilla attention to implicitly model the spatial correspondence. In this way, we robustly project the 2D features to a predefined BEV plane without using sensor calibration as input. Then, we introduce 2D and 3D auxiliary training tasks to enhance the discrimination power of 2D backbones on spatial, semantic, and texture features. Last, we propose a query-based prediction scheme to efficiently generate large-scale fine-grained occupancy predictions. By fusing point clouds that provide complementary spatial information, our REO surpasses the existing methods by a large margin on three benchmarks, including OpenOccupancy, Occ3D-nuScenes, and SemanticKITTI Scene Completion. For instance, our REO achieves 19.8$\times$ speedup compared to Co-Occ, with 1.1 improvements in geometry IoU on OpenOccupancy. Our code will be available at https://github.com/ICEORY/REO. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12177v1-abstract-full').style.display = 'none'; document.getElementById('2411.12177v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 11 figures, 18 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12175">arXiv:2411.12175</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12175">pdf</a>, <a href="https://arxiv.org/format/2411.12175">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> AsynEIO: Asynchronous Monocular Event-Inertial Odometry Using Gaussian Process Regression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhixiang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xudong Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yizhai Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+F">Fan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Panfeng"> Panfeng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12175v1-abstract-short" style="display: inline;"> Event cameras, when combined with inertial sensors, show significant potential for motion estimation in challenging scenarios, such as high-speed maneuvers and low-light environments. There are many methods for producing such estimations, but most boil down to a synchronous discrete-time fusion problem. However, the asynchronous nature of event cameras and their unique fusion mechanism with inerti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12175v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12175v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12175v1-abstract-full" style="display: none;"> Event cameras, when combined with inertial sensors, show significant potential for motion estimation in challenging scenarios, such as high-speed maneuvers and low-light environments. There are many methods for producing such estimations, but most boil down to a synchronous discrete-time fusion problem. However, the asynchronous nature of event cameras and their unique fusion mechanism with inertial sensors remain underexplored. In this paper, we introduce a monocular event-inertial odometry method called AsynEIO, designed to fuse asynchronous event and inertial data within a unified Gaussian Process (GP) regression framework. Our approach incorporates an event-driven frontend that tracks feature trajectories directly from raw event streams at a high temporal resolution. These tracked feature trajectories, along with various inertial factors, are integrated into the same GP regression framework to enable asynchronous fusion. With deriving analytical residual Jacobians and noise models, our method constructs a factor graph that is iteratively optimized and pruned using a sliding-window optimizer. Comparative assessments highlight the performance of different inertial fusion strategies, suggesting optimal choices for varying conditions. Experimental results on both public datasets and our own event-inertial sequences indicate that AsynEIO outperforms existing methods, especially in high-speed and low-illumination scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12175v1-abstract-full').style.display = 'none'; document.getElementById('2411.12175v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to IEEE (2024-11-4)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11925">arXiv:2411.11925</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11925">pdf</a>, <a href="https://arxiv.org/format/2411.11925">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Continuous Speculative Decoding for Autoregressive Image Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zili Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+R">Robert Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+K">Kun Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Q">Qi Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+F">Fei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Xiang%2C+S">Shiming Xiang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11925v1-abstract-short" style="display: inline;"> Continuous-valued Autoregressive (AR) image generation models have demonstrated notable superiority over their discrete-token counterparts, showcasing considerable reconstruction quality and higher generation fidelity. However, the computational demands of the autoregressive framework result in significant inference overhead. While speculative decoding has proven effective in accelerating Large La&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11925v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11925v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11925v1-abstract-full" style="display: none;"> Continuous-valued Autoregressive (AR) image generation models have demonstrated notable superiority over their discrete-token counterparts, showcasing considerable reconstruction quality and higher generation fidelity. However, the computational demands of the autoregressive framework result in significant inference overhead. While speculative decoding has proven effective in accelerating Large Language Models (LLMs), their adaptation to continuous-valued visual autoregressive models remains unexplored. This work generalizes the speculative decoding algorithm from discrete tokens to continuous space. By analyzing the intrinsic properties of output distribution, we establish a tailored acceptance criterion for the diffusion distributions prevalent in such models. To overcome the inconsistency that occurred in speculative decoding output distributions, we introduce denoising trajectory alignment and token pre-filling methods. Additionally, we identify the hard-to-sample distribution in the rejection phase. To mitigate this issue, we propose a meticulous acceptance-rejection sampling method with a proper upper bound, thereby circumventing complex integration. Experimental results show that our continuous speculative decoding achieves a remarkable $2.33\times$ speed-up on off-the-shelf models while maintaining the output distribution. Codes will be available at https://github.com/MarkXCloud/CSpD <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11925v1-abstract-full').style.display = 'none'; document.getElementById('2411.11925v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11913">arXiv:2411.11913</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11913">pdf</a>, <a href="https://arxiv.org/format/2411.11913">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> On-Board Vision-Language Models for Personalized Autonomous Vehicle Motion Control: System Design and Real-World Validation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cui%2C+C">Can Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Zichong Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yupeng Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+J">Juntong Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Park%2C+S">Sung-Yeon Park</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Cong Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Y">Yunsheng Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+X">Xu Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+W">Wenqian Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+Y">Yiheng Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Panchal%2C+J">Jitesh Panchal</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+L">Lingxi Li</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yaobin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Ziran Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11913v1-abstract-short" style="display: inline;"> Personalized driving refers to an autonomous vehicle&#39;s ability to adapt its driving behavior or control strategies to match individual users&#39; preferences and driving styles while maintaining safety and comfort standards. However, existing works either fail to capture every individual preference precisely or become computationally inefficient as the user base expands. Vision-Language Models (VLMs)&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11913v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11913v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11913v1-abstract-full" style="display: none;"> Personalized driving refers to an autonomous vehicle&#39;s ability to adapt its driving behavior or control strategies to match individual users&#39; preferences and driving styles while maintaining safety and comfort standards. However, existing works either fail to capture every individual preference precisely or become computationally inefficient as the user base expands. Vision-Language Models (VLMs) offer promising solutions to this front through their natural language understanding and scene reasoning capabilities. In this work, we propose a lightweight yet effective on-board VLM framework that provides low-latency personalized driving performance while maintaining strong reasoning capabilities. Our solution incorporates a Retrieval-Augmented Generation (RAG)-based memory module that enables continuous learning of individual driving preferences through human feedback. Through comprehensive real-world vehicle deployment and experiments, our system has demonstrated the ability to provide safe, comfortable, and personalized driving experiences across various scenarios and significantly reduce takeover rates by up to 76.9%. To the best of our knowledge, this work represents the first end-to-end VLM-based motion control system in real-world autonomous vehicles. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11913v1-abstract-full').style.display = 'none'; document.getElementById('2411.11913v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11793">arXiv:2411.11793</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11793">pdf</a>, <a href="https://arxiv.org/format/2411.11793">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> A Potential Game Perspective in Federated Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+K">Kang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Ziqi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zuazua%2C+E">Enrique Zuazua</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11793v1-abstract-short" style="display: inline;"> Federated learning (FL) is an emerging paradigm for training machine learning models across distributed clients. Traditionally, in FL settings, a central server assigns training efforts (or strategies) to clients. However, from a market-oriented perspective, clients may independently choose their training efforts based on rational self-interest. To explore this, we propose a potential game framewo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11793v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11793v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11793v1-abstract-full" style="display: none;"> Federated learning (FL) is an emerging paradigm for training machine learning models across distributed clients. Traditionally, in FL settings, a central server assigns training efforts (or strategies) to clients. However, from a market-oriented perspective, clients may independently choose their training efforts based on rational self-interest. To explore this, we propose a potential game framework where each client&#39;s payoff is determined by their individual efforts and the rewards provided by the server. The rewards are influenced by the collective efforts of all clients and can be modulated through a reward factor. Our study begins by establishing the existence of Nash equilibria (NEs), followed by an investigation of uniqueness in homogeneous settings. We demonstrate a significant improvement in clients&#39; training efforts at a critical reward factor, identifying it as the optimal choice for the server. Furthermore, we prove the convergence of the best-response algorithm to compute NEs for our FL game. Finally, we apply the training efforts derived from specific NEs to a real-world FL scenario, validating the effectiveness of the identified optimal reward factor. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11793v1-abstract-full').style.display = 'none'; document.getElementById('2411.11793v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T01; 90C90; 91A10; 93A16 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11781">arXiv:2411.11781</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11781">pdf</a>, <a href="https://arxiv.org/format/2411.11781">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Towards Scalable and Practical Batch-Dynamic Connectivity </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=De+Man%2C+Q">Quinten De Man</a>, <a href="/search/cs?searchtype=author&amp;query=Dhulipala%2C+L">Laxman Dhulipala</a>, <a href="/search/cs?searchtype=author&amp;query=Karczmarz%2C+A">Adam Karczmarz</a>, <a href="/search/cs?searchtype=author&amp;query=%C5%81%C4%85cki%2C+J">Jakub 艁膮cki</a>, <a href="/search/cs?searchtype=author&amp;query=Shun%2C+J">Julian Shun</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhongqi Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11781v1-abstract-short" style="display: inline;"> We study the problem of dynamically maintaining the connected components of an undirected graph subject to edge insertions and deletions. We give the first parallel algorithm for the problem which is work-efficient, supports batches of updates, runs in polylogarithmic depth, and uses only linear total space. The existing algorithms for the problem either use super-linear space, do not come with st&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11781v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11781v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11781v1-abstract-full" style="display: none;"> We study the problem of dynamically maintaining the connected components of an undirected graph subject to edge insertions and deletions. We give the first parallel algorithm for the problem which is work-efficient, supports batches of updates, runs in polylogarithmic depth, and uses only linear total space. The existing algorithms for the problem either use super-linear space, do not come with strong theoretical bounds, or are not parallel. On the empirical side, we provide the first implementation of the cluster forest algorithm, the first linear-space and poly-logarithmic update time algorithm for dynamic connectivity. Experimentally, we find that our algorithm uses up to 19.7x less space and is up to 6.2x faster than the level-set algorithm of HDT, arguably the most widely-implemented dynamic connectivity algorithm with strong theoretical guarantees. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11781v1-abstract-full').style.display = 'none'; document.getElementById('2411.11781v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This is a preliminary version of a paper that will appear at VLDB&#39;25</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11694">arXiv:2411.11694</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11694">pdf</a>, <a href="https://arxiv.org/format/2411.11694">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Technical Report: Enhancing LLM Reasoning with Reward-guided Tree Search </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+J">Jinhao Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zhipeng Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Min%2C+Y">Yingqian Min</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jie Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+X">Xiaoxue Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jiapeng Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+Y">Yiru Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+H">Haoxiang Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+J">Jia Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+W+X">Wayne Xin Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zheng Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+D">Dong Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+J">Jian Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhongyuan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wen%2C+J">Ji-Rong Wen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11694v1-abstract-short" style="display: inline;"> Recently, test-time scaling has garnered significant attention from the research community, largely due to the substantial advancements of the o1 model released by OpenAI. By allocating more computational resources during the inference phase, large language models~(LLMs) can extensively explore the solution space by generating more thought tokens or diverse solutions, thereby producing more accura&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11694v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11694v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11694v1-abstract-full" style="display: none;"> Recently, test-time scaling has garnered significant attention from the research community, largely due to the substantial advancements of the o1 model released by OpenAI. By allocating more computational resources during the inference phase, large language models~(LLMs) can extensively explore the solution space by generating more thought tokens or diverse solutions, thereby producing more accurate responses. However, developing an o1-like reasoning approach is challenging, and researchers have been making various attempts to advance this open area of research. In this paper, we present a preliminary exploration into enhancing the reasoning abilities of LLMs through reward-guided tree search algorithms. This framework is implemented by integrating the policy model, reward model, and search algorithm. It is primarily constructed around a tree search algorithm, where the policy model navigates a dynamically expanding tree guided by a specially trained reward model. We thoroughly explore various design considerations necessary for implementing this framework and provide a detailed report of the technical aspects. To assess the effectiveness of our approach, we focus on mathematical reasoning tasks and conduct extensive evaluations on four challenging datasets, significantly enhancing the reasoning abilities of LLMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11694v1-abstract-full').style.display = 'none'; document.getElementById('2411.11694v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">LLM;Complex Reasoning;Math</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11691">arXiv:2411.11691</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11691">pdf</a>, <a href="https://arxiv.org/format/2411.11691">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Towards Degradation-Robust Reconstruction in Generalizable NeRF </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Park%2C+C+H">Chan Ho Park</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+K+L">Ka Leong Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhicheng Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Q">Qifeng Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11691v1-abstract-short" style="display: inline;"> Generalizable Neural Radiance Field (GNeRF) across scenes has been proven to be an effective way to avoid per-scene optimization by representing a scene with deep image features of source images. However, despite its potential for real-world applications, there has been limited research on the robustness of GNeRFs to different types of degradation present in the source images. The lack of such res&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11691v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11691v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11691v1-abstract-full" style="display: none;"> Generalizable Neural Radiance Field (GNeRF) across scenes has been proven to be an effective way to avoid per-scene optimization by representing a scene with deep image features of source images. However, despite its potential for real-world applications, there has been limited research on the robustness of GNeRFs to different types of degradation present in the source images. The lack of such research is primarily attributed to the absence of a large-scale dataset fit for training a degradation-robust generalizable NeRF model. To address this gap and facilitate investigations into the degradation robustness of 3D reconstruction tasks, we construct the Objaverse Blur Dataset, comprising 50,000 images from over 1000 settings featuring multiple levels of blur degradation. In addition, we design a simple and model-agnostic module for enhancing the degradation robustness of GNeRFs. Specifically, by extracting 3D-aware features through a lightweight depth estimator and denoiser, the proposed module shows improvement on different popular methods in GNeRFs in terms of both quantitative and visual quality over varying degradation types and levels. Our dataset and code will be made publicly available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11691v1-abstract-full').style.display = 'none'; document.getElementById('2411.11691v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11624">arXiv:2411.11624</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11624">pdf</a>, <a href="https://arxiv.org/format/2411.11624">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> Teapot: Efficiently Uncovering Spectre Gadgets in COTS Binaries </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lin%2C+F">Fangzheng Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhongfa Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Sasaki%2C+H">Hiroshi Sasaki</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11624v1-abstract-short" style="display: inline;"> Speculative execution is crucial in enhancing modern processor performance but can introduce Spectre-type vulnerabilities that may leak sensitive information. Detecting Spectre gadgets from programs has been a research focus to enhance the analysis and understanding of Spectre attacks. However, one of the problems of existing approaches is that they rely on the presence of source code (or are impr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11624v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11624v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11624v1-abstract-full" style="display: none;"> Speculative execution is crucial in enhancing modern processor performance but can introduce Spectre-type vulnerabilities that may leak sensitive information. Detecting Spectre gadgets from programs has been a research focus to enhance the analysis and understanding of Spectre attacks. However, one of the problems of existing approaches is that they rely on the presence of source code (or are impractical in terms of run-time performance and gadget detection ability). This paper presents Teapot, the first Spectre gadget scanner that works on COTS binaries with comparable performance to compiler-based alternatives. As its core principle, we introduce Speculation Shadows, a novel approach that separates the binary code for normal execution and speculation simulation in order to improve run-time efficiency. Teapot is based on static binary rewriting. It instruments the program to simulate the effects of speculative execution and also adds integrity checks to detect Spectre gadgets at run time. By leveraging fuzzing, Teapot succeeds in efficiently detecting Spectre gadgets. Evaluations show that Teapot outperforms both performance (more than 20x performant) and gadget detection ability than a previously proposed binary-based approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11624v1-abstract-full').style.display = 'none'; document.getElementById('2411.11624v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in ACM/IEEE International Symposium on Code Generation and Optimization (CGO) 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11581">arXiv:2411.11581</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11581">pdf</a>, <a href="https://arxiv.org/format/2411.11581">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> OASIS: Open Agents Social Interaction Simulations on One Million Agents </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Ziyi Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zaibin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+Z">Zirui Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Y">Yuxian Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Gan%2C+Z">Ziyue Gan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhiyu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Ling%2C+Z">Zijian Ling</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jinsong Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+M">Martz Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+B">Bowen Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+P">Prateek Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+S">Shuyue Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+Z">Zhenfei Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+G">Guohao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+X">Xu Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Lijun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Ghanem%2C+B">Bernard Ghanem</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+H">Huchuan Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Ouyang%2C+W">Wanli Ouyang</a>, <a href="/search/cs?searchtype=author&amp;query=Qiao%2C+Y">Yu Qiao</a>, <a href="/search/cs?searchtype=author&amp;query=Torr%2C+P">Philip Torr</a>, <a href="/search/cs?searchtype=author&amp;query=Shao%2C+J">Jing Shao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11581v2-abstract-short" style="display: inline;"> There has been a growing interest in enhancing rule-based agent-based models (ABMs) for social media platforms (i.e., X, Reddit) with more realistic large language model (LLM) agents, thereby allowing for a more nuanced study of complex systems. As a result, several LLM-based ABMs have been proposed in the past year. While they hold promise, each simulator is specifically designed to study a parti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11581v2-abstract-full').style.display = 'inline'; document.getElementById('2411.11581v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11581v2-abstract-full" style="display: none;"> There has been a growing interest in enhancing rule-based agent-based models (ABMs) for social media platforms (i.e., X, Reddit) with more realistic large language model (LLM) agents, thereby allowing for a more nuanced study of complex systems. As a result, several LLM-based ABMs have been proposed in the past year. While they hold promise, each simulator is specifically designed to study a particular scenario, making it time-consuming and resource-intensive to explore other phenomena using the same ABM. Additionally, these models simulate only a limited number of agents, whereas real-world social media platforms involve millions of users. To this end, we propose OASIS, a generalizable and scalable social media simulator. OASIS is designed based on real-world social media platforms, incorporating dynamically updated environments (i.e., dynamic social networks and post information), diverse action spaces (i.e., following, commenting), and recommendation systems (i.e., interest-based and hot-score-based). Additionally, OASIS supports large-scale user simulations, capable of modeling up to one million users. With these features, OASIS can be easily extended to different social media platforms to study large-scale group phenomena and behaviors. We replicate various social phenomena, including information spreading, group polarization, and herd effects across X and Reddit platforms. Moreover, we provide observations of social phenomena at different agent group scales. We observe that the larger agent group scale leads to more enhanced group dynamics and more diverse and helpful agents&#39; opinions. These findings demonstrate OASIS&#39;s potential as a powerful tool for studying complex systems in digital environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11581v2-abstract-full').style.display = 'none'; document.getElementById('2411.11581v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11551">arXiv:2411.11551</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11551">pdf</a>, <a href="https://arxiv.org/format/2411.11551">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Simple But Not Secure: An Empirical Security Analysis of Two-factor Authentication Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+X">Xin Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+D">Du Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+H">Han Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+M">Meiqi Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+Y">Yan Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+W">Wanpeng Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11551v1-abstract-short" style="display: inline;"> To protect users from data breaches and phishing attacks, service providers typically implement two-factor authentication (2FA) to add an extra layer of security against suspicious login attempts. However, since 2FA can sometimes hinder user experience by introducing additional steps, many websites aim to reduce inconvenience by minimizing the frequency of 2FA prompts. One approach to achieve this&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11551v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11551v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11551v1-abstract-full" style="display: none;"> To protect users from data breaches and phishing attacks, service providers typically implement two-factor authentication (2FA) to add an extra layer of security against suspicious login attempts. However, since 2FA can sometimes hinder user experience by introducing additional steps, many websites aim to reduce inconvenience by minimizing the frequency of 2FA prompts. One approach to achieve this is by storing the user&#39;s ``Remember the Device&#39;&#39; preference in a cookie. As a result, users are only prompted for 2FA when this cookie expires or if they log in from a new device. To understand and improve the security of 2FA systems in real-world settings, we propose SE2FA, a vulnerability evaluation framework designed to detect vulnerabilities in 2FA systems. This framework enables us to analyze the security of 407 2FA systems across popular websites from the Tranco Top 10,000 list. Our analysis and evaluation found three zero-day vulnerabilities on three service providers that could allow an attacker to access a victim&#39;s account without possessing the victim&#39;s second authentication factor, thereby bypassing 2FA protections entirely. A further investigation found that these vulnerabilities stem from design choices aimed at simplifying 2FA for users but that unintentionally reduce its security effectiveness. We have disclosed these findings to the affected websites and assisted them in mitigating the risks. Based on the insights from this research, we provide practical recommendations for countermeasures to strengthen 2FA security and address these newly identified threats. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11551v1-abstract-full').style.display = 'none'; document.getElementById('2411.11551v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11505">arXiv:2411.11505</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11505">pdf</a>, <a href="https://arxiv.org/format/2411.11505">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LaVin-DiT: Large Vision Diffusion Transformer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhaoqing Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+X">Xiaobo Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+R">Runnan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+D">Dongdong Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Changhu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+M">Mingming Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+T">Tongliang Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11505v1-abstract-short" style="display: inline;"> This paper presents the Large Vision Diffusion Transformer (LaVin-DiT), a scalable and unified foundation model designed to tackle over 20 computer vision tasks in a generative framework. Unlike existing large vision models directly adapted from natural language processing architectures, which rely on less efficient autoregressive techniques and disrupt spatial relationships essential for vision d&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11505v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11505v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11505v1-abstract-full" style="display: none;"> This paper presents the Large Vision Diffusion Transformer (LaVin-DiT), a scalable and unified foundation model designed to tackle over 20 computer vision tasks in a generative framework. Unlike existing large vision models directly adapted from natural language processing architectures, which rely on less efficient autoregressive techniques and disrupt spatial relationships essential for vision data, LaVin-DiT introduces key innovations to optimize generative performance for vision tasks. First, to address the high dimensionality of visual data, we incorporate a spatial-temporal variational autoencoder that encodes data into a continuous latent space. Second, for generative modeling, we develop a joint diffusion transformer that progressively produces vision outputs. Third, for unified multi-task training, in-context learning is implemented. Input-target pairs serve as task context, which guides the diffusion transformer to align outputs with specific tasks within the latent space. During inference, a task-specific context set and test data as queries allow LaVin-DiT to generalize across tasks without fine-tuning. Trained on extensive vision datasets, the model is scaled from 0.1B to 3.4B parameters, demonstrating substantial scalability and state-of-the-art performance across diverse vision tasks. This work introduces a novel pathway for large vision foundation models, underscoring the promising potential of diffusion transformers. The code and models will be open-sourced. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11505v1-abstract-full').style.display = 'none'; document.getElementById('2411.11505v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 7 figures, 2 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11424">arXiv:2411.11424</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11424">pdf</a>, <a href="https://arxiv.org/format/2411.11424">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Membership Inference Attack against Long-Context Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zixiong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+G">Gaoyang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chen Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11424v1-abstract-short" style="display: inline;"> Recent advances in Large Language Models (LLMs) have enabled them to overcome their context window limitations, and demonstrate exceptional retrieval and reasoning capacities on longer context. Quesion-answering systems augmented with Long-Context Language Models (LCLMs) can automatically search massive external data and incorporate it into their contexts, enabling faithful predictions and reducin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11424v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11424v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11424v1-abstract-full" style="display: none;"> Recent advances in Large Language Models (LLMs) have enabled them to overcome their context window limitations, and demonstrate exceptional retrieval and reasoning capacities on longer context. Quesion-answering systems augmented with Long-Context Language Models (LCLMs) can automatically search massive external data and incorporate it into their contexts, enabling faithful predictions and reducing issues such as hallucinations and knowledge staleness. Existing studies targeting LCLMs mainly concentrate on addressing the so-called lost-in-the-middle problem or improving the inference effiencicy, leaving their privacy risks largely unexplored. In this paper, we aim to bridge this gap and argue that integrating all information into the long context makes it a repository of sensitive information, which often contains private data such as medical records or personal identities. We further investigate the membership privacy within LCLMs external context, with the aim of determining whether a given document or sequence is included in the LCLMs context. Our basic idea is that if a document lies in the context, it will exhibit a low generation loss or a high degree of semantic similarity to the contents generated by LCLMs. We for the first time propose six membership inference attack (MIA) strategies tailored for LCLMs and conduct extensive experiments on various popular models. Empirical results demonstrate that our attacks can accurately infer membership status in most cases, e.g., 90.66% attack F1-score on Multi-document QA datasets with LongChat-7b-v1.5-32k, highlighting significant risks of membership leakage within LCLMs input contexts. Furthermore, we examine the underlying reasons why LCLMs are susceptible to revealing such membership information. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11424v1-abstract-full').style.display = 'none'; document.getElementById('2411.11424v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11396">arXiv:2411.11396</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11396">pdf</a>, <a href="https://arxiv.org/format/2411.11396">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Stacking Brick by Brick: Aligned Feature Isolation for Incremental Face Forgery Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+J">Jikang Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+Z">Zhiyuan Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Ying Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Hao%2C+L">Li Hao</a>, <a href="/search/cs?searchtype=author&amp;query=Ai%2C+J">Jiaxin Ai</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+Q">Qin Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+C">Chen Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhongyuan Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11396v2-abstract-short" style="display: inline;"> The rapid advancement of face forgery techniques has introduced a growing variety of forgeries. Incremental Face Forgery Detection (IFFD), involving gradually adding new forgery data to fine-tune the previously trained model, has been introduced as a promising strategy to deal with evolving forgery methods. However, a naively trained IFFD model is prone to catastrophic forgetting when new forgerie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11396v2-abstract-full').style.display = 'inline'; document.getElementById('2411.11396v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11396v2-abstract-full" style="display: none;"> The rapid advancement of face forgery techniques has introduced a growing variety of forgeries. Incremental Face Forgery Detection (IFFD), involving gradually adding new forgery data to fine-tune the previously trained model, has been introduced as a promising strategy to deal with evolving forgery methods. However, a naively trained IFFD model is prone to catastrophic forgetting when new forgeries are integrated, as treating all forgeries as a single &#39;&#39;Fake&#34; class in the Real/Fake classification can cause different forgery types overriding one another, thereby resulting in the forgetting of unique characteristics from earlier tasks and limiting the model&#39;s effectiveness in learning forgery specificity and generality. In this paper, we propose to stack the latent feature distributions of previous and new tasks brick by brick, $\textit{i.e.}$, achieving $\textbf{aligned feature isolation}$. In this manner, we aim to preserve learned forgery information and accumulate new knowledge by minimizing distribution overriding, thereby mitigating catastrophic forgetting. To achieve this, we first introduce Sparse Uniform Replay (SUR) to obtain the representative subsets that could be treated as the uniformly sparse versions of the previous global distributions. We then propose a Latent-space Incremental Detector (LID) that leverages SUR data to isolate and align distributions. For evaluation, we construct a more advanced and comprehensive benchmark tailored for IFFD. The leading experimental results validate the superiority of our method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11396v2-abstract-full').style.display = 'none'; document.getElementById('2411.11396v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11360">arXiv:2411.11360</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11360">pdf</a>, <a href="https://arxiv.org/format/2411.11360">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> CCExpert: Advancing MLLM Capability in Remote Sensing Change Captioning with Difference-Aware Integration and a Foundational Dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhiming Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+M">Mingze Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+S">Sheng Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yanjing Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+B">Baochang Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11360v1-abstract-short" style="display: inline;"> Remote Sensing Image Change Captioning (RSICC) aims to generate natural language descriptions of surface changes between multi-temporal remote sensing images, detailing the categories, locations, and dynamics of changed objects (e.g., additions or disappearances). Many current methods attempt to leverage the long-sequence understanding and reasoning capabilities of multimodal large language models&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11360v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11360v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11360v1-abstract-full" style="display: none;"> Remote Sensing Image Change Captioning (RSICC) aims to generate natural language descriptions of surface changes between multi-temporal remote sensing images, detailing the categories, locations, and dynamics of changed objects (e.g., additions or disappearances). Many current methods attempt to leverage the long-sequence understanding and reasoning capabilities of multimodal large language models (MLLMs) for this task. However, without comprehensive data support, these approaches often alter the essential feature transmission pathways of MLLMs, disrupting the intrinsic knowledge within the models and limiting their potential in RSICC. In this paper, we propose a novel model, CCExpert, based on a new, advanced multimodal large model framework. Firstly, we design a difference-aware integration module to capture multi-scale differences between bi-temporal images and incorporate them into the original image context, thereby enhancing the signal-to-noise ratio of differential features. Secondly, we constructed a high-quality, diversified dataset called CC-Foundation, containing 200,000 image pairs and 1.2 million captions, to provide substantial data support for continue pretraining in this domain. Lastly, we employed a three-stage progressive training process to ensure the deep integration of the difference-aware integration module with the pretrained MLLM. CCExpert achieved a notable performance of $S^*_m=81.80$ on the LEVIR-CC benchmark, significantly surpassing previous state-of-the-art methods. The code and part of the dataset will soon be open-sourced at https://github.com/Meize0729/CCExpert. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11360v1-abstract-full').style.display = 'none'; document.getElementById('2411.11360v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11329">arXiv:2411.11329</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11329">pdf</a>, <a href="https://arxiv.org/format/2411.11329">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Color-Oriented Redundancy Reduction in Dataset Distillation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+B">Bowen Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zijian Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+Y">Yadan Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Baktashmotlagh%2C+M">Mahsa Baktashmotlagh</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+Y">Yadan Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Z">Zi Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11329v1-abstract-short" style="display: inline;"> Dataset Distillation (DD) is designed to generate condensed representations of extensive image datasets, enhancing training efficiency. Despite recent advances, there remains considerable potential for improvement, particularly in addressing the notable redundancy within the color space of distilled images. In this paper, we propose AutoPalette, a framework that minimizes color redundancy at the i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11329v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11329v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11329v1-abstract-full" style="display: none;"> Dataset Distillation (DD) is designed to generate condensed representations of extensive image datasets, enhancing training efficiency. Despite recent advances, there remains considerable potential for improvement, particularly in addressing the notable redundancy within the color space of distilled images. In this paper, we propose AutoPalette, a framework that minimizes color redundancy at the individual image and overall dataset levels, respectively. At the image level, we employ a palette network, a specialized neural network, to dynamically allocate colors from a reduced color space to each pixel. The palette network identifies essential areas in synthetic images for model training and consequently assigns more unique colors to them. At the dataset level, we develop a color-guided initialization strategy to minimize redundancy among images. Representative images with the least replicated color patterns are selected based on the information gain. A comprehensive performance study involving various datasets and evaluation scenarios is conducted, demonstrating the superior performance of our proposed color-aware DD compared to existing DD methods. The code is available at \url{https://github.com/KeViNYuAn0314/AutoPalette}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11329v1-abstract-full').style.display = 'none'; document.getElementById('2411.11329v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">38th Conference on Neural Information Processing Systems (NeurIPS 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11214">arXiv:2411.11214</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11214">pdf</a>, <a href="https://arxiv.org/format/2411.11214">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DeforHMR: Vision Transformer with Deformable Cross-Attention for 3D Human Mesh Recovery </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Heo%2C+J">Jaewoo Heo</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+G">George Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zeyu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yeung-Levy%2C+S">Serena Yeung-Levy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11214v1-abstract-short" style="display: inline;"> Human Mesh Recovery (HMR) is an important yet challenging problem with applications across various domains including motion capture, augmented reality, and biomechanics. Accurately predicting human pose parameters from a single image remains a challenging 3D computer vision task. In this work, we introduce DeforHMR, a novel regression-based monocular HMR framework designed to enhance the predictio&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11214v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11214v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11214v1-abstract-full" style="display: none;"> Human Mesh Recovery (HMR) is an important yet challenging problem with applications across various domains including motion capture, augmented reality, and biomechanics. Accurately predicting human pose parameters from a single image remains a challenging 3D computer vision task. In this work, we introduce DeforHMR, a novel regression-based monocular HMR framework designed to enhance the prediction of human pose parameters using deformable attention transformers. DeforHMR leverages a novel query-agnostic deformable cross-attention mechanism within the transformer decoder to effectively regress the visual features extracted from a frozen pretrained vision transformer (ViT) encoder. The proposed deformable cross-attention mechanism allows the model to attend to relevant spatial features more flexibly and in a data-dependent manner. Equipped with a transformer decoder capable of spatially-nuanced attention, DeforHMR achieves state-of-the-art performance for single-frame regression-based methods on the widely used 3D HMR benchmarks 3DPW and RICH. By pushing the boundary on the field of 3D human mesh recovery through deformable attention, we introduce an new, effective paradigm for decoding local spatial information from large pretrained vision encoders in computer vision. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11214v1-abstract-full').style.display = 'none'; document.getElementById('2411.11214v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 5 figures, 3DV2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11135">arXiv:2411.11135</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11135">pdf</a>, <a href="https://arxiv.org/format/2411.11135">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Oscillation Inversion: Understand the structure of Large Flow Model through the Lens of Inversion Method </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+Y">Yan Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+Z">Zhenxiao Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Cong%2C+X">Xiaoyan Cong</a>, <a href="/search/cs?searchtype=author&amp;query=guo%2C+L">Lanqing guo</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yuehao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+P">Peihao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhangyang Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11135v1-abstract-short" style="display: inline;"> We explore the oscillatory behavior observed in inversion methods applied to large-scale text-to-image diffusion models, with a focus on the &#34;Flux&#34; model. By employing a fixed-point-inspired iterative approach to invert real-world images, we observe that the solution does not achieve convergence, instead oscillating between distinct clusters. Through both toy experiments and real-world diffusion m&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11135v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11135v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11135v1-abstract-full" style="display: none;"> We explore the oscillatory behavior observed in inversion methods applied to large-scale text-to-image diffusion models, with a focus on the &#34;Flux&#34; model. By employing a fixed-point-inspired iterative approach to invert real-world images, we observe that the solution does not achieve convergence, instead oscillating between distinct clusters. Through both toy experiments and real-world diffusion models, we demonstrate that these oscillating clusters exhibit notable semantic coherence. We offer theoretical insights, showing that this behavior arises from oscillatory dynamics in rectified flow models. Building on this understanding, we introduce a simple and fast distribution transfer technique that facilitates image enhancement, stroke-based recoloring, as well as visual prompt-guided image editing. Furthermore, we provide quantitative results demonstrating the effectiveness of our method for tasks such as image enhancement, makeup transfer, reconstruction quality, and guided sampling quality. Higher-quality examples of videos and images are available at \href{https://yanyanzheng96.github.io/oscillation_inversion/}{this link}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11135v1-abstract-full').style.display = 'none'; document.getElementById('2411.11135v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11114">arXiv:2411.11114</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11114">pdf</a>, <a href="https://arxiv.org/format/2411.11114">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> JailbreakLens: Interpreting Jailbreak Mechanism in the Lens of Representation and Circuit </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=He%2C+Z">Zeqing He</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhibo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chu%2C+Z">Zhixuan Chu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+H">Huiyu Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+R">Rui Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+K">Kui Ren</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chun Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11114v1-abstract-short" style="display: inline;"> Despite the outstanding performance of Large language models (LLMs) in diverse tasks, they are vulnerable to jailbreak attacks, wherein adversarial prompts are crafted to bypass their security mechanisms and elicit unexpected responses.Although jailbreak attacks are prevalent, the understanding of their underlying mechanisms remains limited. Recent studies have explain typical jailbreaking behavio&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11114v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11114v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11114v1-abstract-full" style="display: none;"> Despite the outstanding performance of Large language models (LLMs) in diverse tasks, they are vulnerable to jailbreak attacks, wherein adversarial prompts are crafted to bypass their security mechanisms and elicit unexpected responses.Although jailbreak attacks are prevalent, the understanding of their underlying mechanisms remains limited. Recent studies have explain typical jailbreaking behavior (e.g., the degree to which the model refuses to respond) of LLMs by analyzing the representation shifts in their latent space caused by jailbreak prompts or identifying key neurons that contribute to the success of these attacks. However, these studies neither explore diverse jailbreak patterns nor provide a fine-grained explanation from the failure of circuit to the changes of representational, leaving significant gaps in uncovering the jailbreak mechanism. In this paper, we propose JailbreakLens, an interpretation framework that analyzes jailbreak mechanisms from both representation (which reveals how jailbreaks alter the model&#39;s harmfulness perception) and circuit perspectives (which uncovers the causes of these deceptions by identifying key circuits contributing to the vulnerability), tracking their evolution throughout the entire response generation process. We then conduct an in-depth evaluation of jailbreak behavior on four mainstream LLMs under seven jailbreak strategies. Our evaluation finds that jailbreak prompts amplify components that reinforce affirmative responses while suppressing those that produce refusal. Although this manipulation shifts model representations toward safe clusters to deceive the LLM, leading it to provide detailed responses instead of refusals, it still produce abnormal activation which can be caught in the circuit analysis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11114v1-abstract-full').style.display = 'none'; document.getElementById('2411.11114v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 10 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10926">arXiv:2411.10926</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10926">pdf</a>, <a href="https://arxiv.org/format/2411.10926">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TNSE.2024.3498042">10.1109/TNSE.2024.3498042 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Link-identified Routing Architecture in Space </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Hefan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhiyuan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+S">Shan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Meng%2C+Q">Qingkai Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+H">Hongbin Luo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10926v1-abstract-short" style="display: inline;"> Low earth orbit (LEO) satellite networks have the potential to provide low-latency communication with global coverage. To unleash this potential, it is crucial to achieve efficient packet delivery. In this paper, we propose a Link-identified Routing (LiR) architecture for LEO satellite networks. The LiR architecture leverages the deterministic neighbor relation of LEO constellations, and identifie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10926v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10926v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10926v1-abstract-full" style="display: none;"> Low earth orbit (LEO) satellite networks have the potential to provide low-latency communication with global coverage. To unleash this potential, it is crucial to achieve efficient packet delivery. In this paper, we propose a Link-identified Routing (LiR) architecture for LEO satellite networks. The LiR architecture leverages the deterministic neighbor relation of LEO constellations, and identifies each inter-satellite link (ISL). Moreover, LiR architecture adopts source-route-style forwarding based on in-packet bloom filter (BF). Each satellite could efficiently encode multiple ISL identifiers via an in-packet BF to specify the end-to-end path for the packets. Due to false positives caused by BF, the more ISLs are encoded at a time, the more redundant forwarding cases emerge. Based on the topology characteristics, we derive the expected forwarding overhead in a closed-form and propose the optimal encoding policy. To accommodate link-state changes in LEO satellite networks, we propose the on-demand rerouting scheme and the on-demand detouring scheme to address the intermittent ISLs. We also elaborate how to take advantage of LiR architecture to achieve seamless handover for ground-satellite links (GSLs). Finally, we conduct extensive numerical experiments and packet-level simulations to verify our analytical results and evaluate the performance of the LiR architecture. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10926v1-abstract-full').style.display = 'none'; document.getElementById('2411.10926v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Transactions on Network Science and Engineering, 2025 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10687">arXiv:2411.10687</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10687">pdf</a>, <a href="https://arxiv.org/format/2411.10687">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> EDBooks: AI-Enhanced Interactive Narratives for Programming Education </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Oney%2C+S">Steve Oney</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+Y">Yue Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+F">Fei Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+Y+S">Young Suh Hong</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Ziang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Khajekar%2C+Y">Yamini Khajekar</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jiacheng Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+A+Y">April Yi Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10687v1-abstract-short" style="display: inline;"> Large Language Models (LLMs) have shown the potential to be valuable teaching tools, with the potential of giving every student a personalized tutor. However, one challenge with using LLMs to learn new concepts is that when learning a topic in an unfamiliar domain, it can be difficult to know what questions to ask. Further, language models do not always encourage &#34;active learning&#34; where students c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10687v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10687v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10687v1-abstract-full" style="display: none;"> Large Language Models (LLMs) have shown the potential to be valuable teaching tools, with the potential of giving every student a personalized tutor. However, one challenge with using LLMs to learn new concepts is that when learning a topic in an unfamiliar domain, it can be difficult to know what questions to ask. Further, language models do not always encourage &#34;active learning&#34; where students can test and assess their understanding. In this paper, we propose ways to combine large language models with &#34;traditional&#34; learning materials (like e-books) to give readers the benefits of working with LLMs (the ability to ask personally interesting questions and receive personalized answers) with the benefits of a traditional e-book (having a structure and content that is pedagogically sound). This work shows one way that LLMs have the potential to improve learning materials and make personalized programming education more accessible to a broader audience. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10687v1-abstract-full').style.display = 'none'; document.getElementById('2411.10687v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10640">arXiv:2411.10640</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10640">pdf</a>, <a href="https://arxiv.org/format/2411.10640">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> BlueLM-V-3B: Algorithm and System Co-Design for Multimodal Large Language Models on Mobile Devices </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lu%2C+X">Xudong Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yinghao Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Cheng Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+H">Hui Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+B">Boheng Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Y">Yina Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+R">Rui Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+G">Guanxin Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+R">Renshou Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Y">Yan Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+Y">Yi Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+L">Lei Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Bian%2C+L">Liuyang Bian</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhaoxiong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+L">Long Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yanzhou Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+H">Han Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+A">Aojun Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Wen%2C+Y">Yafei Wen</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xiaoxin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+S">Shuai Ren</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Hongsheng Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10640v1-abstract-short" style="display: inline;"> The emergence and growing popularity of multimodal large language models (MLLMs) have significant potential to enhance various aspects of daily life, from improving communication to facilitating learning and problem-solving. Mobile phones, as essential daily companions, represent the most effective and accessible deployment platform for MLLMs, enabling seamless integration into everyday tasks. How&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10640v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10640v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10640v1-abstract-full" style="display: none;"> The emergence and growing popularity of multimodal large language models (MLLMs) have significant potential to enhance various aspects of daily life, from improving communication to facilitating learning and problem-solving. Mobile phones, as essential daily companions, represent the most effective and accessible deployment platform for MLLMs, enabling seamless integration into everyday tasks. However, deploying MLLMs on mobile phones presents challenges due to limitations in memory size and computational capability, making it difficult to achieve smooth and real-time processing without extensive optimization. In this paper, we present BlueLM-V-3B, an algorithm and system co-design approach specifically tailored for the efficient deployment of MLLMs on mobile platforms. To be specific, we redesign the dynamic resolution scheme adopted by mainstream MLLMs and implement system optimization for hardware-aware deployment to optimize model inference on mobile phones. BlueLM-V-3B boasts the following key highlights: (1) Small Size: BlueLM-V-3B features a language model with 2.7B parameters and a vision encoder with 400M parameters. (2) Fast Speed: BlueLM-V-3B achieves a generation speed of 24.4 token/s on the MediaTek Dimensity 9300 processor with 4-bit LLM weight quantization. (3) Strong Performance: BlueLM-V-3B has attained the highest average score of 66.1 on the OpenCompass benchmark among models with $\leq$ 4B parameters and surpassed a series of models with much larger parameter sizes (e.g., MiniCPM-V-2.6, InternVL2-8B). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10640v1-abstract-full').style.display = 'none'; document.getElementById('2411.10640v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10639">arXiv:2411.10639</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10639">pdf</a>, <a href="https://arxiv.org/format/2411.10639">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> MTA: Multimodal Task Alignment for BEV Perception and Captioning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Y">Yunsheng Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Yaman%2C+B">Burhaneddin Yaman</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+X">Xin Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Tao%2C+F">Feng Tao</a>, <a href="/search/cs?searchtype=author&amp;query=Mallik%2C+A">Abhirup Mallik</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Ziran Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+L">Liu Ren</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10639v1-abstract-short" style="display: inline;"> Bird&#39;s eye view (BEV)-based 3D perception plays a crucial role in autonomous driving applications. The rise of large language models has spurred interest in BEV-based captioning to understand object behavior in the surrounding environment. However, existing approaches treat perception and captioning as separate tasks, focusing on the performance of only one of the tasks and overlooking the potenti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10639v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10639v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10639v1-abstract-full" style="display: none;"> Bird&#39;s eye view (BEV)-based 3D perception plays a crucial role in autonomous driving applications. The rise of large language models has spurred interest in BEV-based captioning to understand object behavior in the surrounding environment. However, existing approaches treat perception and captioning as separate tasks, focusing on the performance of only one of the tasks and overlooking the potential benefits of multimodal alignment. To bridge this gap between modalities, we introduce MTA, a novel multimodal task alignment framework that boosts both BEV perception and captioning. MTA consists of two key components: (1) BEV-Language Alignment (BLA), a contextual learning mechanism that aligns the BEV scene representations with ground-truth language representations, and (2) Detection-Captioning Alignment (DCA), a cross-modal prompting mechanism that aligns detection and captioning outputs. MTA integrates into state-of-the-art baselines during training, adding no extra computational complexity at runtime. Extensive experiments on the nuScenes and TOD3Cap datasets show that MTA significantly outperforms state-of-the-art baselines, achieving a 4.9% improvement in perception and a 9.2% improvement in captioning. These results underscore the effectiveness of unified alignment in reconciling BEV-based perception and captioning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10639v1-abstract-full').style.display = 'none'; document.getElementById('2411.10639v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10469">arXiv:2411.10469</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10469">pdf</a>, <a href="https://arxiv.org/ps/2411.10469">ps</a>, <a href="https://arxiv.org/format/2411.10469">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> User-wise Perturbations for User Identity Protection in EEG-Based BCIs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xiaoqing Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+S">Siyang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Tu%2C+Y">Yunlu Tu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Ziwei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+D">Dongrui Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10469v1-abstract-short" style="display: inline;"> Objective: An electroencephalogram (EEG)-based brain-computer interface (BCI) is a direct communication pathway between the human brain and a computer. Most research so far studied more accurate BCIs, but much less attention has been paid to the ethics of BCIs. Aside from task-specific information, EEG signals also contain rich private information, e.g., user identity, emotion, disorders, etc., wh&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10469v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10469v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10469v1-abstract-full" style="display: none;"> Objective: An electroencephalogram (EEG)-based brain-computer interface (BCI) is a direct communication pathway between the human brain and a computer. Most research so far studied more accurate BCIs, but much less attention has been paid to the ethics of BCIs. Aside from task-specific information, EEG signals also contain rich private information, e.g., user identity, emotion, disorders, etc., which should be protected. Approach: We show for the first time that adding user-wise perturbations can make identity information in EEG unlearnable. We propose four types of user-wise privacy-preserving perturbations, i.e., random noise, synthetic noise, error minimization noise, and error maximization noise. After adding the proposed perturbations to EEG training data, the user identity information in the data becomes unlearnable, while the BCI task information remains unaffected. Main results: Experiments on six EEG datasets using three neural network classifiers and various traditional machine learning models demonstrated the robustness and practicability of the proposed perturbations. Significance: Our research shows the feasibility of hiding user identity information in EEG data without impacting the primary BCI task information. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10469v1-abstract-full').style.display = 'none'; document.getElementById('2411.10469v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10136">arXiv:2411.10136</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10136">pdf</a>, <a href="https://arxiv.org/format/2411.10136">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> CoSAM: Self-Correcting SAM for Domain Generalization in 2D Medical Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Fu%2C+Y">Yihang Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Ziyang Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+Y">Yiwen Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Lei%2C+X">Xingliang Lei</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhisong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+Y">Yong Xia</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10136v1-abstract-short" style="display: inline;"> Medical images often exhibit distribution shifts due to variations in imaging protocols and scanners across different medical centers. Domain Generalization (DG) methods aim to train models on source domains that can generalize to unseen target domains. Recently, the segment anything model (SAM) has demonstrated strong generalization capabilities due to its prompt-based design, and has gained sign&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10136v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10136v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10136v1-abstract-full" style="display: none;"> Medical images often exhibit distribution shifts due to variations in imaging protocols and scanners across different medical centers. Domain Generalization (DG) methods aim to train models on source domains that can generalize to unseen target domains. Recently, the segment anything model (SAM) has demonstrated strong generalization capabilities due to its prompt-based design, and has gained significant attention in image segmentation tasks. Existing SAM-based approaches attempt to address the need for manual prompts by introducing prompt generators that automatically generate these prompts. However, we argue that auto-generated prompts may not be sufficiently accurate under distribution shifts, potentially leading to incorrect predictions that still require manual verification and correction by clinicians. To address this challenge, we propose a method for 2D medical image segmentation called Self-Correcting SAM (CoSAM). Our approach begins by generating coarse masks using SAM in a prompt-free manner, providing prior prompts for the subsequent stages, and eliminating the need for prompt generators. To automatically refine these coarse masks, we introduce a generalized error decoder that simulates the correction process typically performed by clinicians. Furthermore, we generate diverse prompts as feedback based on the corrected masks, which are used to iteratively refine the predictions within a self-correcting loop, enhancing the generalization performance of our model. Extensive experiments on two medical image segmentation benchmarks across multiple scenarios demonstrate the superiority of CoSAM over state-of-the-art SAM-based methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10136v1-abstract-full').style.display = 'none'; document.getElementById('2411.10136v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09595">arXiv:2411.09595</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09595">pdf</a>, <a href="https://arxiv.org/format/2411.09595">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LLaMA-Mesh: Unifying 3D Mesh Generation with Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhengyi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lorraine%2C+J">Jonathan Lorraine</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yikai Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+H">Hang Su</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+J">Jun Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Fidler%2C+S">Sanja Fidler</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+X">Xiaohui Zeng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09595v1-abstract-short" style="display: inline;"> This work explores expanding the capabilities of large language models (LLMs) pretrained on text to generate 3D meshes within a unified model. This offers key advantages of (1) leveraging spatial knowledge already embedded in LLMs, derived from textual sources like 3D tutorials, and (2) enabling conversational 3D generation and mesh understanding. A primary challenge is effectively tokenizing 3D m&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09595v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09595v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09595v1-abstract-full" style="display: none;"> This work explores expanding the capabilities of large language models (LLMs) pretrained on text to generate 3D meshes within a unified model. This offers key advantages of (1) leveraging spatial knowledge already embedded in LLMs, derived from textual sources like 3D tutorials, and (2) enabling conversational 3D generation and mesh understanding. A primary challenge is effectively tokenizing 3D mesh data into discrete tokens that LLMs can process seamlessly. To address this, we introduce LLaMA-Mesh, a novel approach that represents the vertex coordinates and face definitions of 3D meshes as plain text, allowing direct integration with LLMs without expanding the vocabulary. We construct a supervised fine-tuning (SFT) dataset enabling pretrained LLMs to (1) generate 3D meshes from text prompts, (2) produce interleaved text and 3D mesh outputs as required, and (3) understand and interpret 3D meshes. Our work is the first to demonstrate that LLMs can be fine-tuned to acquire complex spatial knowledge for 3D mesh generation in a text-based format, effectively unifying the 3D and text modalities. LLaMA-Mesh achieves mesh generation quality on par with models trained from scratch while maintaining strong text generation performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09595v1-abstract-full').style.display = 'none'; document.getElementById('2411.09595v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">See the project website at https://research.nvidia.com/labs/toronto-ai/LLaMA-Mesh/</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T05 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.3.5; I.2.10; I.2.6 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09553">arXiv:2411.09553</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09553">pdf</a>, <a href="https://arxiv.org/format/2411.09553">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> OOD-SEG: Out-Of-Distribution detection for image SEGmentation with sparse multi-class positive-only annotations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Junwen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhonghao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=MacCormac%2C+O">Oscar MacCormac</a>, <a href="/search/cs?searchtype=author&amp;query=Shapey%2C+J">Jonathan Shapey</a>, <a href="/search/cs?searchtype=author&amp;query=Vercauteren%2C+T">Tom Vercauteren</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09553v2-abstract-short" style="display: inline;"> Despite significant advancements, segmentation based on deep neural networks in medical and surgical imaging faces several challenges, two of which we aim to address in this work. First, acquiring complete pixel-level segmentation labels for medical images is time-consuming and requires domain expertise. Second, typical segmentation pipelines cannot detect out-of-distribution (OOD) pixels, leaving&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09553v2-abstract-full').style.display = 'inline'; document.getElementById('2411.09553v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09553v2-abstract-full" style="display: none;"> Despite significant advancements, segmentation based on deep neural networks in medical and surgical imaging faces several challenges, two of which we aim to address in this work. First, acquiring complete pixel-level segmentation labels for medical images is time-consuming and requires domain expertise. Second, typical segmentation pipelines cannot detect out-of-distribution (OOD) pixels, leaving them prone to spurious outputs during deployment. In this work, we propose a novel segmentation approach exploiting OOD detection that learns only from sparsely annotated pixels from multiple positive-only classes. These multi-class positive annotations naturally fall within the in-distribution (ID) set. Unlabelled pixels may contain positive classes but also negative ones, including what is typically referred to as \emph{background} in standard segmentation formulations. Here, we forgo the need for background annotation and consider these together with any other unseen classes as part of the OOD set. Our framework can integrate, at a pixel-level, any OOD detection approaches designed for classification tasks. To address the lack of existing OOD datasets and established evaluation metric for medical image segmentation, we propose a cross-validation strategy that treats held-out labelled classes as OOD. Extensive experiments on both multi-class hyperspectral and RGB surgical imaging datasets demonstrate the robustness and generalisation capability of our proposed framework. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09553v2-abstract-full').style.display = 'none'; document.getElementById('2411.09553v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09286">arXiv:2411.09286</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09286">pdf</a>, <a href="https://arxiv.org/format/2411.09286">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ICDM54844.2022.00166">10.1109/ICDM54844.2022.00166 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A Centralized-Distributed Transfer Model for Cross-Domain Recommendation Based on Multi-Source Heterogeneous Transfer Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+K">Ke Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Ziliang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+W">Wei Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Y">Yuhao Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chenglin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+N">Nengxue Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+C">Cai Cao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09286v1-abstract-short" style="display: inline;"> Cross-domain recommendation (CDR) methods are proposed to tackle the sparsity problem in click through rate (CTR) estimation. Existing CDR methods directly transfer knowledge from the source domains to the target domain and ignore the heterogeneities among domains, including feature dimensional heterogeneity and latent space heterogeneity, which may lead to negative transfer. Besides, most of the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09286v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09286v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09286v1-abstract-full" style="display: none;"> Cross-domain recommendation (CDR) methods are proposed to tackle the sparsity problem in click through rate (CTR) estimation. Existing CDR methods directly transfer knowledge from the source domains to the target domain and ignore the heterogeneities among domains, including feature dimensional heterogeneity and latent space heterogeneity, which may lead to negative transfer. Besides, most of the existing methods are based on single-source transfer, which cannot simultaneously utilize knowledge from multiple source domains to further improve the model performance in the target domain. In this paper, we propose a centralized-distributed transfer model (CDTM) for CDR based on multi-source heterogeneous transfer learning. To address the issue of feature dimension heterogeneity, we build a dual embedding structure: domain specific embedding (DSE) and global shared embedding (GSE) to model the feature representation in the single domain and the commonalities in the global space,separately. To solve the latent space heterogeneity, the transfer matrix and attention mechanism are used to map and combine DSE and GSE adaptively. Extensive offline and online experiments demonstrate the effectiveness of our model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09286v1-abstract-full').style.display = 'none'; document.getElementById('2411.09286v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in: 2022 IEEE International Conference on Data Mining (ICDM) (The authors were affiliated Hangzhou NetEase Cloud Music Technology Co., Ltd.)</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10