CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 3,428 results for author: <span class="mathjax">Zhang, C</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Zhang%2C+C">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Zhang, C"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Zhang%2C+C&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Zhang, C"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Zhang%2C+C&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Zhang%2C+C&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhang%2C+C&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhang%2C+C&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhang%2C+C&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhang%2C+C&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14053">arXiv:2411.14053</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14053">pdf</a>, <a href="https://arxiv.org/format/2411.14053">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Stereo Anything: Unifying Stereo Matching with Large-Scale Mixed Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Guo%2C+X">Xianda Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chenming Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Youmin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Nie%2C+D">Dujun Nie</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+R">Ruilin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+W">Wenzhao Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Poggi%2C+M">Matteo Poggi</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+L">Long Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14053v1-abstract-short" style="display: inline;"> Stereo matching has been a pivotal component in 3D vision, aiming to find corresponding points between pairs of stereo images to recover depth information. In this work, we introduce StereoAnything, a highly practical solution for robust stereo matching. Rather than focusing on a specialized model, our goal is to develop a versatile foundational model capable of handling stereo images across diver&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14053v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14053v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14053v1-abstract-full" style="display: none;"> Stereo matching has been a pivotal component in 3D vision, aiming to find corresponding points between pairs of stereo images to recover depth information. In this work, we introduce StereoAnything, a highly practical solution for robust stereo matching. Rather than focusing on a specialized model, our goal is to develop a versatile foundational model capable of handling stereo images across diverse environments. To this end, we scale up the dataset by collecting labeled stereo images and generating synthetic stereo pairs from unlabeled monocular images. To further enrich the model&#39;s ability to generalize across different conditions, we introduce a novel synthetic dataset that complements existing data by adding variability in baselines, camera angles, and scene types. We extensively evaluate the zero-shot capabilities of our model on five public datasets, showcasing its impressive ability to generalize to new, unseen data. Code will be available at \url{https://github.com/XiandaGuo/OpenStereo}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14053v1-abstract-full').style.display = 'none'; document.getElementById('2411.14053v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Code will be available at \url{https://github.com/XiandaGuo/OpenStereo}</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14049">arXiv:2411.14049</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14049">pdf</a>, <a href="https://arxiv.org/format/2411.14049">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Out-Of-Distribution Detection with Diversification (Provably) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yao%2C+H">Haiyun Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+Z">Zongbo Han</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+H">Huazhu Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+X">Xi Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Q">Qinghua Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Changqing Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14049v1-abstract-short" style="display: inline;"> Out-of-distribution (OOD) detection is crucial for ensuring reliable deployment of machine learning models. Recent advancements focus on utilizing easily accessible auxiliary outliers (e.g., data from the web or other datasets) in training. However, we experimentally reveal that these methods still struggle to generalize their detection capabilities to unknown OOD data, due to the limited diversit&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14049v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14049v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14049v1-abstract-full" style="display: none;"> Out-of-distribution (OOD) detection is crucial for ensuring reliable deployment of machine learning models. Recent advancements focus on utilizing easily accessible auxiliary outliers (e.g., data from the web or other datasets) in training. However, we experimentally reveal that these methods still struggle to generalize their detection capabilities to unknown OOD data, due to the limited diversity of the auxiliary outliers collected. Therefore, we thoroughly examine this problem from the generalization perspective and demonstrate that a more diverse set of auxiliary outliers is essential for enhancing the detection capabilities. However, in practice, it is difficult and costly to collect sufficiently diverse auxiliary outlier data. Therefore, we propose a simple yet practical approach with a theoretical guarantee, termed Diversity-induced Mixup for OOD detection (diverseMix), which enhances the diversity of auxiliary outlier set for training in an efficient way. Extensive experiments show that diverseMix achieves superior performance on commonly used and recent challenging large-scale benchmarks, which further confirm the importance of the diversity of auxiliary outliers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14049v1-abstract-full').style.display = 'none'; document.getElementById('2411.14049v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14025">arXiv:2411.14025</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14025">pdf</a>, <a href="https://arxiv.org/format/2411.14025">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> RISecure-PUF: Multipurpose PUF-Driven Security Extensions with Lookaside Buffer in RISC-V </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chenghao Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xiaolin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Qin%2C+K">Kailun Qin</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+T">Tengfei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+Y">Yipeng Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+T">Tianyi Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+D">Dawu Gu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14025v1-abstract-short" style="display: inline;"> RISC-V&#39;s limited security features hinder its use in confidential computing and heterogeneous platforms. This paper introduces RISecure-PUF, a security extension utilizing existing Physical Unclonable Functions for key generation and secure protocol purposes. A one-way hash function is integrated to ensure provable security against modeling attacks, while a lookaside buffer accelerates batch sampl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14025v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14025v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14025v1-abstract-full" style="display: none;"> RISC-V&#39;s limited security features hinder its use in confidential computing and heterogeneous platforms. This paper introduces RISecure-PUF, a security extension utilizing existing Physical Unclonable Functions for key generation and secure protocol purposes. A one-way hash function is integrated to ensure provable security against modeling attacks, while a lookaside buffer accelerates batch sampling and minimizes reliance on error correction codes. Implemented on the Genesys 2 FPGA, RISecure-PUF improves at least $2.72\times$ in batch scenarios with negligible hardware overhead and a maximum performance reduction of $10.7\%$, enabled by reusing the hash function module in integrated environments such as cryptographic engines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14025v1-abstract-full').style.display = 'none'; document.getElementById('2411.14025v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13220">arXiv:2411.13220</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13220">pdf</a>, <a href="https://arxiv.org/ps/2411.13220">ps</a>, <a href="https://arxiv.org/format/2411.13220">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3704857">10.1145/3704857 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> CF-GKAT: Efficient Validation of Control-Flow Transformations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Cheng Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Kapp%C3%A9%2C+T">Tobias Kapp茅</a>, <a href="/search/cs?searchtype=author&amp;query=Narv%C3%A1ez%2C+D+E">David E. Narv谩ez</a>, <a href="/search/cs?searchtype=author&amp;query=Naus%2C+N">Nico Naus</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13220v1-abstract-short" style="display: inline;"> Guarded Kleene Algebra with Tests (GKAT) provides a sound and complete framework to reason about trace equivalence between simple imperative programs. However, there are still several notable limitations. First, GKAT is completely agnostic with respect to the meaning of primitives, to keep equivalence decidable. Second, GKAT excludes non-local control flow such as goto, break, and return. To overc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13220v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13220v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13220v1-abstract-full" style="display: none;"> Guarded Kleene Algebra with Tests (GKAT) provides a sound and complete framework to reason about trace equivalence between simple imperative programs. However, there are still several notable limitations. First, GKAT is completely agnostic with respect to the meaning of primitives, to keep equivalence decidable. Second, GKAT excludes non-local control flow such as goto, break, and return. To overcome these limitations, we introduce Control-Flow GKAT (CF-GKAT), a system that allows reasoning about programs that include non-local control flow as well as hardcoded values. CF-GKAT is able to soundly and completely verify trace equivalence of a larger class of programs, while preserving the nearly-linear efficiency of GKAT. This makes CF-GKAT suitable for the verification of control-flow manipulating procedures, such as decompilation and goto-elimination. To demonstrate CF-GKAT&#39;s abilities, we validated the output of several highly non-trivial program transformations, such as Erosa and Hendren&#39;s goto-elimination procedure and the output of Ghidra decompiler. CF-GKAT opens up the application of Kleene Algebra to a wider set of challenges, and provides an important verification tool that can be applied to the field of decompilation and control-flow transformation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13220v1-abstract-full').style.display = 'none'; document.getElementById('2411.13220v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published at POPL 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13112">arXiv:2411.13112</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13112">pdf</a>, <a href="https://arxiv.org/format/2411.13112">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DriveMLLM: A Benchmark for Spatial Understanding with Multimodal Large Language Models in Autonomous Driving </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Guo%2C+X">Xianda Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+R">Ruijun Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+Y">Yiqun Duan</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+Y">Yuhang He</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chenming Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shuai Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+L">Long Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13112v1-abstract-short" style="display: inline;"> Autonomous driving requires a comprehensive understanding of 3D environments to facilitate high-level tasks such as motion prediction, planning, and mapping. In this paper, we introduce DriveMLLM, a benchmark specifically designed to evaluate the spatial understanding capabilities of multimodal large language models (MLLMs) in autonomous driving. DriveMLLM includes 2,734 front-facing camera images&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13112v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13112v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13112v1-abstract-full" style="display: none;"> Autonomous driving requires a comprehensive understanding of 3D environments to facilitate high-level tasks such as motion prediction, planning, and mapping. In this paper, we introduce DriveMLLM, a benchmark specifically designed to evaluate the spatial understanding capabilities of multimodal large language models (MLLMs) in autonomous driving. DriveMLLM includes 2,734 front-facing camera images and introduces both absolute and relative spatial reasoning tasks, accompanied by linguistically diverse natural language questions. To measure MLLMs&#39; performance, we propose novel evaluation metrics focusing on spatial understanding. We evaluate several state-of-the-art MLLMs on DriveMLLM, and our results reveal the limitations of current models in understanding complex spatial relationships in driving contexts. We believe these findings underscore the need for more advanced MLLM-based spatial reasoning methods and highlight the potential for DriveMLLM to drive further research in autonomous driving. Code will be available at \url{https://github.com/XiandaGuo/Drive-MLLM}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13112v1-abstract-full').style.display = 'none'; document.getElementById('2411.13112v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Code will be available at \url{https://github.com/XiandaGuo/Drive-MLLM}</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12713">arXiv:2411.12713</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12713">pdf</a>, <a href="https://arxiv.org/format/2411.12713">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> CATCH: Complementary Adaptive Token-level Contrastive Decoding to Mitigate Hallucinations in LVLMs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kan%2C+Z">Zhehan Kan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Ce Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liao%2C+Z">Zihan Liao</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+Y">Yapeng Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+W">Wenming Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+J">Junyuan Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xu Li</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+D">Dongmei Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yaowei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liao%2C+Q">Qingmin Liao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12713v1-abstract-short" style="display: inline;"> Large Vision-Language Model (LVLM) systems have demonstrated impressive vision-language reasoning capabilities but suffer from pervasive and severe hallucination issues, posing significant risks in critical domains such as healthcare and autonomous systems. Despite previous efforts to mitigate hallucinations, a persistent issue remains: visual defect from vision-language misalignment, creating a b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12713v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12713v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12713v1-abstract-full" style="display: none;"> Large Vision-Language Model (LVLM) systems have demonstrated impressive vision-language reasoning capabilities but suffer from pervasive and severe hallucination issues, posing significant risks in critical domains such as healthcare and autonomous systems. Despite previous efforts to mitigate hallucinations, a persistent issue remains: visual defect from vision-language misalignment, creating a bottleneck in visual processing capacity. To address this challenge, we develop Complementary Adaptive Token-level Contrastive Decoding to Mitigate Hallucinations in LVLMs (CATCH), based on the Information Bottleneck theory. CATCH introduces Complementary Visual Decoupling (CVD) for visual information separation, Non-Visual Screening (NVS) for hallucination detection, and Adaptive Token-level Contrastive Decoding (ATCD) for hallucination mitigation. CATCH addresses issues related to visual defects that cause diminished fine-grained feature perception and cumulative hallucinations in open-ended scenarios. It is applicable to various visual question-answering tasks without requiring any specific data or prior knowledge, and generalizes robustly to new tasks without additional training, opening new possibilities for advancing LVLM in various challenging applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12713v1-abstract-full').style.display = 'none'; document.getElementById('2411.12713v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12593">arXiv:2411.12593</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12593">pdf</a>, <a href="https://arxiv.org/format/2411.12593">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> AdaCM$^2$: On Understanding Extremely Long-Term Video with Adaptive Cross-Modality Memory Reduction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Man%2C+Y">Yuanbin Man</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Y">Ying Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chengming Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+B">Bingzhe Li</a>, <a href="/search/cs?searchtype=author&amp;query=Niu%2C+W">Wei Niu</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+M">Miao Yin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12593v1-abstract-short" style="display: inline;"> The advancements in large language models (LLMs) have propelled the improvement of video understanding tasks by incorporating LLMs with visual models. However, most existing LLM-based models (e.g., VideoLLaMA, VideoChat) are constrained to processing short-duration videos. Recent attempts to understand long-term videos by extracting and compressing visual features into a fixed memory size. Neverth&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12593v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12593v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12593v1-abstract-full" style="display: none;"> The advancements in large language models (LLMs) have propelled the improvement of video understanding tasks by incorporating LLMs with visual models. However, most existing LLM-based models (e.g., VideoLLaMA, VideoChat) are constrained to processing short-duration videos. Recent attempts to understand long-term videos by extracting and compressing visual features into a fixed memory size. Nevertheless, those methods leverage only visual modality to merge video tokens and overlook the correlation between visual and textual queries, leading to difficulties in effectively handling complex question-answering tasks. To address the challenges of long videos and complex prompts, we propose AdaCM$^2$, which, for the first time, introduces an adaptive cross-modality memory reduction approach to video-text alignment in an auto-regressive manner on video streams. Our extensive experiments on various video understanding tasks, such as video captioning, video question answering, and video classification, demonstrate that AdaCM$^2$ achieves state-of-the-art performance across multiple datasets while significantly reducing memory usage. Notably, it achieves a 4.5% improvement across multiple tasks in the LVU dataset with a GPU memory consumption reduction of up to 65%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12593v1-abstract-full').style.display = 'none'; document.getElementById('2411.12593v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12450">arXiv:2411.12450</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12450">pdf</a>, <a href="https://arxiv.org/format/2411.12450">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Frequency-Aware Guidance for Blind Image Restoration via Diffusion Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+J">Jun Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Lyu%2C+Z">Zihang Lyu</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+H">Hao Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Cong Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ju%2C+Y">Yakun Ju</a>, <a href="/search/cs?searchtype=author&amp;query=Shui%2C+C">Changjian Shui</a>, <a href="/search/cs?searchtype=author&amp;query=Lam%2C+K">Kin-Man Lam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12450v1-abstract-short" style="display: inline;"> Blind image restoration remains a significant challenge in low-level vision tasks. Recently, denoising diffusion models have shown remarkable performance in image synthesis. Guided diffusion models, leveraging the potent generative priors of pre-trained models along with a differential guidance loss, have achieved promising results in blind image restoration. However, these models typically consid&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12450v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12450v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12450v1-abstract-full" style="display: none;"> Blind image restoration remains a significant challenge in low-level vision tasks. Recently, denoising diffusion models have shown remarkable performance in image synthesis. Guided diffusion models, leveraging the potent generative priors of pre-trained models along with a differential guidance loss, have achieved promising results in blind image restoration. However, these models typically consider data consistency solely in the spatial domain, often resulting in distorted image content. In this paper, we propose a novel frequency-aware guidance loss that can be integrated into various diffusion models in a plug-and-play manner. Our proposed guidance loss, based on 2D discrete wavelet transform, simultaneously enforces content consistency in both the spatial and frequency domains. Experimental results demonstrate the effectiveness of our method in three blind restoration tasks: blind image deblurring, imaging through turbulence, and blind restoration for multiple degradations. Notably, our method achieves a significant improvement in PSNR score, with a remarkable enhancement of 3.72\,dB in image deblurring. Moreover, our method exhibits superior capability in generating images with rich details and reduced distortion, leading to the best visual quality. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12450v1-abstract-full').style.display = 'none'; document.getElementById('2411.12450v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 6 figures, has been accepted by the ECCV 2024: AIM workshop</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12372">arXiv:2411.12372</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12372">pdf</a>, <a href="https://arxiv.org/format/2411.12372">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> RedPajama: an Open Dataset for Training Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Weber%2C+M">Maurice Weber</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+D">Daniel Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Anthony%2C+Q">Quentin Anthony</a>, <a href="/search/cs?searchtype=author&amp;query=Oren%2C+Y">Yonatan Oren</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+S">Shane Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Alexandrov%2C+A">Anton Alexandrov</a>, <a href="/search/cs?searchtype=author&amp;query=Lyu%2C+X">Xiaozhong Lyu</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+H">Huu Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+X">Xiaozhe Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+V">Virginia Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Athiwaratkun%2C+B">Ben Athiwaratkun</a>, <a href="/search/cs?searchtype=author&amp;query=Chalamala%2C+R">Rahul Chalamala</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+K">Kezhen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Ryabinin%2C+M">Max Ryabinin</a>, <a href="/search/cs?searchtype=author&amp;query=Dao%2C+T">Tri Dao</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+P">Percy Liang</a>, <a href="/search/cs?searchtype=author&amp;query=R%C3%A9%2C+C">Christopher R茅</a>, <a href="/search/cs?searchtype=author&amp;query=Rish%2C+I">Irina Rish</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Ce Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12372v1-abstract-short" style="display: inline;"> Large language models are increasingly becoming a cornerstone technology in artificial intelligence, the sciences, and society as a whole, yet the optimal strategies for dataset composition and filtering remain largely elusive. Many of the top-performing models lack transparency in their dataset curation and model development processes, posing an obstacle to the development of fully open language&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12372v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12372v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12372v1-abstract-full" style="display: none;"> Large language models are increasingly becoming a cornerstone technology in artificial intelligence, the sciences, and society as a whole, yet the optimal strategies for dataset composition and filtering remain largely elusive. Many of the top-performing models lack transparency in their dataset curation and model development processes, posing an obstacle to the development of fully open language models. In this paper, we identify three core data-related challenges that must be addressed to advance open-source language models. These include (1) transparency in model development, including the data curation process, (2) access to large quantities of high-quality data, and (3) availability of artifacts and metadata for dataset curation and analysis. To address these challenges, we release RedPajama-V1, an open reproduction of the LLaMA training dataset. In addition, we release RedPajama-V2, a massive web-only dataset consisting of raw, unfiltered text data together with quality signals and metadata. Together, the RedPajama datasets comprise over 100 trillion tokens spanning multiple domains and with their quality signals facilitate the filtering of data, aiming to inspire the development of numerous new datasets. To date, these datasets have already been used in the training of strong language models used in production, such as Snowflake Arctic, Salesforce&#39;s XGen and AI2&#39;s OLMo. To provide insight into the quality of RedPajama, we present a series of analyses and ablation studies with decoder-only language models with up to 1.6B parameters. Our findings demonstrate how quality signals for web data can be effectively leveraged to curate high-quality subsets of the dataset, underscoring the potential of RedPajama to advance the development of transparent and high-performing language models at scale. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12372v1-abstract-full').style.display = 'none'; document.getElementById('2411.12372v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">38th Conference on Neural Information Processing Systems (NeurIPS 2024) Track on Datasets and Benchmarks</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11921">arXiv:2411.11921</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11921">pdf</a>, <a href="https://arxiv.org/format/2411.11921">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DeSiRe-GS: 4D Street Gaussians for Static-Dynamic Decomposition and Surface Reconstruction for Urban Driving Scenes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Peng%2C+C">Chensheng Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chengwei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yixiao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+C">Chenfeng Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Y">Yichen Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+W">Wenzhao Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Keutzer%2C+K">Kurt Keutzer</a>, <a href="/search/cs?searchtype=author&amp;query=Tomizuka%2C+M">Masayoshi Tomizuka</a>, <a href="/search/cs?searchtype=author&amp;query=Zhan%2C+W">Wei Zhan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11921v1-abstract-short" style="display: inline;"> We present DeSiRe-GS, a self-supervised gaussian splatting representation, enabling effective static-dynamic decomposition and high-fidelity surface reconstruction in complex driving scenarios. Our approach employs a two-stage optimization pipeline of dynamic street Gaussians. In the first stage, we extract 2D motion masks based on the observation that 3D Gaussian Splatting inherently can reconstr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11921v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11921v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11921v1-abstract-full" style="display: none;"> We present DeSiRe-GS, a self-supervised gaussian splatting representation, enabling effective static-dynamic decomposition and high-fidelity surface reconstruction in complex driving scenarios. Our approach employs a two-stage optimization pipeline of dynamic street Gaussians. In the first stage, we extract 2D motion masks based on the observation that 3D Gaussian Splatting inherently can reconstruct only the static regions in dynamic environments. These extracted 2D motion priors are then mapped into the Gaussian space in a differentiable manner, leveraging an efficient formulation of dynamic Gaussians in the second stage. Combined with the introduced geometric regularizations, our method are able to address the over-fitting issues caused by data sparsity in autonomous driving, reconstructing physically plausible Gaussians that align with object surfaces rather than floating in air. Furthermore, we introduce temporal cross-view consistency to ensure coherence across time and viewpoints, resulting in high-quality surface reconstruction. Comprehensive experiments demonstrate the efficiency and effectiveness of DeSiRe-GS, surpassing prior self-supervised arts and achieving accuracy comparable to methods relying on external 3D bounding box annotations. Code is available at \url{https://github.com/chengweialan/DeSiRe-GS} <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11921v1-abstract-full').style.display = 'none'; document.getElementById('2411.11921v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11913">arXiv:2411.11913</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11913">pdf</a>, <a href="https://arxiv.org/format/2411.11913">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> On-Board Vision-Language Models for Personalized Autonomous Vehicle Motion Control: System Design and Real-World Validation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cui%2C+C">Can Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Zichong Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yupeng Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+J">Juntong Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Park%2C+S">Sung-Yeon Park</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Cong Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Y">Yunsheng Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+X">Xu Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+W">Wenqian Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+Y">Yiheng Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Panchal%2C+J">Jitesh Panchal</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+L">Lingxi Li</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yaobin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Ziran Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11913v1-abstract-short" style="display: inline;"> Personalized driving refers to an autonomous vehicle&#39;s ability to adapt its driving behavior or control strategies to match individual users&#39; preferences and driving styles while maintaining safety and comfort standards. However, existing works either fail to capture every individual preference precisely or become computationally inefficient as the user base expands. Vision-Language Models (VLMs)&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11913v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11913v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11913v1-abstract-full" style="display: none;"> Personalized driving refers to an autonomous vehicle&#39;s ability to adapt its driving behavior or control strategies to match individual users&#39; preferences and driving styles while maintaining safety and comfort standards. However, existing works either fail to capture every individual preference precisely or become computationally inefficient as the user base expands. Vision-Language Models (VLMs) offer promising solutions to this front through their natural language understanding and scene reasoning capabilities. In this work, we propose a lightweight yet effective on-board VLM framework that provides low-latency personalized driving performance while maintaining strong reasoning capabilities. Our solution incorporates a Retrieval-Augmented Generation (RAG)-based memory module that enables continuous learning of individual driving preferences through human feedback. Through comprehensive real-world vehicle deployment and experiments, our system has demonstrated the ability to provide safe, comfortable, and personalized driving experiences across various scenarios and significantly reduce takeover rates by up to 76.9%. To the best of our knowledge, this work represents the first end-to-end VLM-based motion control system in real-world autonomous vehicles. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11913v1-abstract-full').style.display = 'none'; document.getElementById('2411.11913v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11636">arXiv:2411.11636</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11636">pdf</a>, <a href="https://arxiv.org/format/2411.11636">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> SP${ }^3$ : Superpixel-propagated pseudo-label learning for weakly semi-supervised medical image segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+S">Shiman Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+J">Jiayue Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shaolei Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+X">Xiaokun Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chenxi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+Z">Zhijian Song</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11636v1-abstract-short" style="display: inline;"> Deep learning-based medical image segmentation helps assist diagnosis and accelerate the treatment process while the model training usually requires large-scale dense annotation datasets. Weakly semi-supervised medical image segmentation is an essential application because it only requires a small amount of scribbles and a large number of unlabeled data to train the model, which greatly reduces th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11636v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11636v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11636v1-abstract-full" style="display: none;"> Deep learning-based medical image segmentation helps assist diagnosis and accelerate the treatment process while the model training usually requires large-scale dense annotation datasets. Weakly semi-supervised medical image segmentation is an essential application because it only requires a small amount of scribbles and a large number of unlabeled data to train the model, which greatly reduces the clinician&#39;s effort to fully annotate images. To handle the inadequate supervisory information challenge in weakly semi-supervised segmentation (WSSS), a SuperPixel-Propagated Pseudo-label (SP${}^3$) learning method is proposed, using the structural information contained in superpixel for supplemental information. Specifically, the annotation of scribbles is propagated to superpixels and thus obtains a dense annotation for supervised training. Since the quality of pseudo-labels is limited by the low-quality annotation, the beneficial superpixels selected by dynamic thresholding are used to refine pseudo-labels. Furthermore, aiming to alleviate the negative impact of noise in pseudo-label, superpixel-level uncertainty is incorporated to guide the pseudo-label supervision for stable learning. Our method achieves state-of-the-art performance on both tumor and organ segmentation datasets under the WSSS setting, using only 3\% of the annotation workload compared to fully supervised methods and attaining approximately 80\% Dice score. Additionally, our method outperforms eight weakly and semi-supervised methods under both weakly supervised and semi-supervised settings. Results of extensive experiments validate the effectiveness and annotation efficiency of our weakly semi-supervised segmentation, which can assist clinicians in achieving automated segmentation for organs or tumors quickly and ultimately benefit patients. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11636v1-abstract-full').style.display = 'none'; document.getElementById('2411.11636v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 7 figures. Under Review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10770">arXiv:2411.10770</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10770">pdf</a>, <a href="https://arxiv.org/format/2411.10770">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Task Offloading for Vehicular Edge Computing Based on Improved Hotstuff under Parking Assistance </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liang%2C+G">Guoling Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+C">Chunhai Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+F">Feng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chuan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+L">Liehuang Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10770v1-abstract-short" style="display: inline;"> Parked-assisted vehicular edge computing (PVEC) fully leverages communication and computing resources of parking vehicles, thereby significantly alleviating the pressure on edge servers. However, resource sharing and trading for vehicular task offloading in the PVEC environment usually occur between untrustworthy entities, which compromises the security of data sharing and transactions by vehicles&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10770v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10770v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10770v1-abstract-full" style="display: none;"> Parked-assisted vehicular edge computing (PVEC) fully leverages communication and computing resources of parking vehicles, thereby significantly alleviating the pressure on edge servers. However, resource sharing and trading for vehicular task offloading in the PVEC environment usually occur between untrustworthy entities, which compromises the security of data sharing and transactions by vehicles and edge devices. To address these concerns, blockchain is introduced to provide a secure and trustworthy environment for offloading and transactions in PVEC. Nevertheless, due to the mobility of the vehicles, the processes of computing offloading and blockchain transactions are interrupted, which greatly reduces the reliability of the blockchain in edge computing process. In this paper, we propose a blockchain-based PVEC (BPVEC) offloading framework to enhance the security and reliability of the task offloading and transaction. Specifically, a consensus node selection algorithm based on the connected dominating set (CDS) is designed to improve the Hotstuff consensus according to parking time, computing capability and communication quality, which enhances blockchain reliability in computing offloading and transactions. Meanwhile, a Stackelberg game model, establishing the roadside units (RSUs) and parking vehicles (PVs) as leaders and the requesting vehicles (RVs) as follower, is utilized to optimize the offloading strategy and pricing. Subsequently, a BPVEC offloading strategy algorithm with gradient descent method is designed to maximize system revenue. Simulation results show that the proposed BPVEC offloading scheme is secure and reliable while ensuring maximum benefits. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10770v1-abstract-full').style.display = 'none'; document.getElementById('2411.10770v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10680">arXiv:2411.10680</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10680">pdf</a>, <a href="https://arxiv.org/format/2411.10680">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Two-layer consensus based on master-slave consortium chain data sharing for Internet of Vehicles </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+F">Feng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+B">Benchang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+C">Chunhai Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chuan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+L">Liehuang Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+G">Guoling Liang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10680v1-abstract-short" style="display: inline;"> Due to insufficient scalability, the existing consortium chain cannot meet the requirements of low latency, high throughput, and high security when applied to Internet of Vehicles (IoV) data sharing. Therefore, we propose a two-layer consensus algorithm based on the master-slave consortium chain - Weighted Raft and Byzantine Fault Tolerance (WRBFT). The intra-group consensus of the WRBFT algorithm&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10680v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10680v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10680v1-abstract-full" style="display: none;"> Due to insufficient scalability, the existing consortium chain cannot meet the requirements of low latency, high throughput, and high security when applied to Internet of Vehicles (IoV) data sharing. Therefore, we propose a two-layer consensus algorithm based on the master-slave consortium chain - Weighted Raft and Byzantine Fault Tolerance (WRBFT). The intra-group consensus of the WRBFT algorithm adopts weighted Raft, and the best node is selected as the master node to lead the intra-group consensus by comprehensively evaluating the signal-to-noise ratio (SNR), data processing capacity and storage capacity of the nodes. The inter-group consensus adopts practical Byzantine fault tolerance (PBFT) based on BLS aggregate signature with nonlinear coefficients to ensure that the inter-group consensus can tolerate 1/3 of Byzantine nodes. At the same time, the verifiable random function (VRF) is used to select the master node of the inter-group consensus to ensure the randomness of the master node. A large number of experimental results show that the proposed WRBFT algorithm reduces delay, and improves throughput and system security. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10680v1-abstract-full').style.display = 'none'; document.getElementById('2411.10680v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10403">arXiv:2411.10403</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10403">pdf</a>, <a href="https://arxiv.org/format/2411.10403">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> On the Foundation Model for Cardiac MRI Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Loecher%2C+M">Michael Loecher</a>, <a href="/search/cs?searchtype=author&amp;query=Alkan%2C+C">Cagan Alkan</a>, <a href="/search/cs?searchtype=author&amp;query=Yurt%2C+M">Mahmut Yurt</a>, <a href="/search/cs?searchtype=author&amp;query=Vasanawala%2C+S+S">Shreyas S. Vasanawala</a>, <a href="/search/cs?searchtype=author&amp;query=Ennis%2C+D+B">Daniel B. Ennis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10403v1-abstract-short" style="display: inline;"> In recent years, machine learning (ML) based reconstruction has been widely investigated and employed in cardiac magnetic resonance (CMR) imaging. ML-based reconstructions can deliver clinically acceptable image quality under substantially accelerated scans. ML-based reconstruction, however, also requires substantial data and computational time to train the neural network, which is often optimized&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10403v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10403v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10403v1-abstract-full" style="display: none;"> In recent years, machine learning (ML) based reconstruction has been widely investigated and employed in cardiac magnetic resonance (CMR) imaging. ML-based reconstructions can deliver clinically acceptable image quality under substantially accelerated scans. ML-based reconstruction, however, also requires substantial data and computational time to train the neural network, which is often optimized for a fixed acceleration rate or image contrast. In practice, imaging parameters are often tuned to best suit the diagnosis, which may differ from the training data. This can result in degraded image quality, and multiple trained networks are needed to fulfill the clinical demands. In this study, we propose a foundation model that uses adaptive unrolling, channel-shifting, and Pattern and Contrast-Prompt-UNet (PCP-UNet) to tackle the problem. In particular, the undersampled data goes through a different number of unrolled iterations according to its acceleration rate. Channel-shifting improves reconstructed data quality. The PCP-UNet is equipped with an image contrast and sampling pattern prompt. In vivo CMR experiments were performed using mixed combinations of image contrasts, acceleration rates, and (under)sampling patterns. The proposed foundation model has significantly improved image quality for a wide range of CMR protocols and outperforms the conventional ML-based method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10403v1-abstract-full').style.display = 'none'; document.getElementById('2411.10403v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">For MICCAI CMRxRecon Challenge 2024 team CardiAxs</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10063">arXiv:2411.10063</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10063">pdf</a>, <a href="https://arxiv.org/format/2411.10063">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Federated Domain Generalization via Prompt Learning and Aggregation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gong%2C+S">Shuai Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+C">Chaoran Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chunyun Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wenna Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Nie%2C+X">Xiushan Nie</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+L">Lei Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10063v1-abstract-short" style="display: inline;"> Federated domain generalization (FedDG) aims to improve the global model generalization in unseen domains by addressing data heterogeneity under privacy-preserving constraints. A common strategy in existing FedDG studies involves sharing domain-specific knowledge among clients, such as spectrum information, class prototypes, and data styles. However, this knowledge is extracted directly from local&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10063v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10063v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10063v1-abstract-full" style="display: none;"> Federated domain generalization (FedDG) aims to improve the global model generalization in unseen domains by addressing data heterogeneity under privacy-preserving constraints. A common strategy in existing FedDG studies involves sharing domain-specific knowledge among clients, such as spectrum information, class prototypes, and data styles. However, this knowledge is extracted directly from local client samples, and sharing such sensitive information poses a potential risk of data leakage, which might not fully meet the requirements of FedDG. In this paper, we introduce prompt learning to adapt pre-trained vision-language models (VLMs) in the FedDG scenario, and leverage locally learned prompts as a more secure bridge to facilitate knowledge transfer among clients. Specifically, we propose a novel FedDG framework through Prompt Learning and AggregatioN (PLAN), which comprises two training stages to collaboratively generate local prompts and global prompts at each federated round. First, each client performs both text and visual prompt learning using their own data, with local prompts indirectly synchronized by regarding the global prompts as a common reference. Second, all domain-specific local prompts are exchanged among clients and selectively aggregated into the global prompts using lightweight attention-based aggregators. The global prompts are finally applied to adapt VLMs to unseen target domains. As our PLAN framework requires training only a limited number of prompts and lightweight aggregators, it offers notable advantages in computational and communication efficiency for FedDG. Extensive experiments demonstrate the superior generalization ability of PLAN across four benchmark datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10063v1-abstract-full').style.display = 'none'; document.getElementById('2411.10063v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work has been submitted to the IEEE for possible publication</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09971">arXiv:2411.09971</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09971">pdf</a>, <a href="https://arxiv.org/format/2411.09971">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Explanation for Trajectory Planning using Multi-modal Large Language Model for Autonomous Driving </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yamazaki%2C+S">Shota Yamazaki</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chenyu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Nanri%2C+T">Takuya Nanri</a>, <a href="/search/cs?searchtype=author&amp;query=Shigekane%2C+A">Akio Shigekane</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Siyuan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Nishiyama%2C+J">Jo Nishiyama</a>, <a href="/search/cs?searchtype=author&amp;query=Chu%2C+T">Tao Chu</a>, <a href="/search/cs?searchtype=author&amp;query=Yokosawa%2C+K">Kohei Yokosawa</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09971v1-abstract-short" style="display: inline;"> End-to-end style autonomous driving models have been developed recently. These models lack interpretability of decision-making process from perception to control of the ego vehicle, resulting in anxiety for passengers. To alleviate it, it is effective to build a model which outputs captions describing future behaviors of the ego vehicle and their reason. However, the existing approaches generate r&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09971v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09971v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09971v1-abstract-full" style="display: none;"> End-to-end style autonomous driving models have been developed recently. These models lack interpretability of decision-making process from perception to control of the ego vehicle, resulting in anxiety for passengers. To alleviate it, it is effective to build a model which outputs captions describing future behaviors of the ego vehicle and their reason. However, the existing approaches generate reasoning text that inadequately reflects the future plans of the ego vehicle, because they train models to output captions using momentary control signals as inputs. In this study, we propose a reasoning model that takes future planning trajectories of the ego vehicle as inputs to solve this limitation with the dataset newly collected. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09971v1-abstract-full').style.display = 'none'; document.getElementById('2411.09971v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted and presented at ECCV 2024 2nd Workshop on Vision-Centric Autonomous Driving (VCAD) on September 30, 2024. 13 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09749">arXiv:2411.09749</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09749">pdf</a>, <a href="https://arxiv.org/format/2411.09749">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Adversarial Attacks Using Differentiable Rendering: A Survey </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hull%2C+M">Matthew Hull</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Kira%2C+Z">Zsolt Kira</a>, <a href="/search/cs?searchtype=author&amp;query=Chau%2C+D+H">Duen Horng Chau</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09749v1-abstract-short" style="display: inline;"> Differentiable rendering methods have emerged as a promising means for generating photo-realistic and physically plausible adversarial attacks by manipulating 3D objects and scenes that can deceive deep neural networks (DNNs). Recently, differentiable rendering capabilities have evolved significantly into a diverse landscape of libraries, such as Mitsuba, PyTorch3D, and methods like Neural Radianc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09749v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09749v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09749v1-abstract-full" style="display: none;"> Differentiable rendering methods have emerged as a promising means for generating photo-realistic and physically plausible adversarial attacks by manipulating 3D objects and scenes that can deceive deep neural networks (DNNs). Recently, differentiable rendering capabilities have evolved significantly into a diverse landscape of libraries, such as Mitsuba, PyTorch3D, and methods like Neural Radiance Fields and 3D Gaussian Splatting for solving inverse rendering problems that share conceptually similar properties commonly used to attack DNNs, such as back-propagation and optimization. However, the adversarial machine learning research community has not yet fully explored or understood such capabilities for generating attacks. Some key reasons are that researchers often have different attack goals, such as misclassification or misdetection, and use different tasks to accomplish these goals by manipulating different representation in a scene, such as the mesh or texture of an object. This survey adopts a task-oriented unifying framework that systematically summarizes common tasks, such as manipulating textures, altering illumination, and modifying 3D meshes to exploit vulnerabilities in DNNs. Our framework enables easy comparison of existing works, reveals research gaps and spotlights exciting future research directions in this rapidly evolving field. Through focusing on how these tasks enable attacks on various DNNs such as image classification, facial recognition, object detection, optical flow and depth estimation, our survey helps researchers and practitioners better understand the vulnerabilities of computer vision systems against photorealistic adversarial attacks that could threaten real-world applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09749v1-abstract-full').style.display = 'none'; document.getElementById('2411.09749v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09449">arXiv:2411.09449</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09449">pdf</a>, <a href="https://arxiv.org/format/2411.09449">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Image Regeneration: Evaluating Text-to-Image Model via Generating Identical Image with Multimodal Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Meng%2C+C">Chutian Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+F">Fan Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Miao%2C+J">Jiaxu Miao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yi Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhuang%2C+Y">Yueting Zhuang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09449v1-abstract-short" style="display: inline;"> Diffusion models have revitalized the image generation domain, playing crucial roles in both academic research and artistic expression. With the emergence of new diffusion models, assessing the performance of text-to-image models has become increasingly important. Current metrics focus on directly matching the input text with the generated image, but due to cross-modal information asymmetry, this&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09449v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09449v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09449v1-abstract-full" style="display: none;"> Diffusion models have revitalized the image generation domain, playing crucial roles in both academic research and artistic expression. With the emergence of new diffusion models, assessing the performance of text-to-image models has become increasingly important. Current metrics focus on directly matching the input text with the generated image, but due to cross-modal information asymmetry, this leads to unreliable or incomplete assessment results. Motivated by this, we introduce the Image Regeneration task in this study to assess text-to-image models by tasking the T2I model with generating an image according to the reference image. We use GPT4V to bridge the gap between the reference image and the text input for the T2I model, allowing T2I models to understand image content. This evaluation process is simplified as comparisons between the generated image and the reference image are straightforward. Two regeneration datasets spanning content-diverse and style-diverse evaluation dataset are introduced to evaluate the leading diffusion models currently available. Additionally, we present ImageRepainter framework to enhance the quality of generated images by improving content comprehension via MLLM guided iterative generation and revision. Our comprehensive experiments have showcased the effectiveness of this framework in assessing the generative capabilities of models. By leveraging MLLM, we have demonstrated that a robust T2M can produce images more closely resembling the reference image. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09449v1-abstract-full').style.display = 'none'; document.getElementById('2411.09449v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09297">arXiv:2411.09297</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09297">pdf</a>, <a href="https://arxiv.org/format/2411.09297">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> DTELS: Towards Dynamic Granularity of Timeline Summarization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chenlong Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+T">Tong Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+P">Pengfei Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+Z">Zhuoran Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yubo Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+K">Kang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+J">Jun Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09297v1-abstract-short" style="display: inline;"> The rapid proliferation of online news has posed significant challenges in tracking the continuous development of news topics. Traditional timeline summarization constructs a chronological summary of the events but often lacks the flexibility to meet the diverse granularity needs. To overcome this limitation, we introduce a new paradigm, Dynamic-granularity TimELine Summarization, (DTELS), which a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09297v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09297v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09297v1-abstract-full" style="display: none;"> The rapid proliferation of online news has posed significant challenges in tracking the continuous development of news topics. Traditional timeline summarization constructs a chronological summary of the events but often lacks the flexibility to meet the diverse granularity needs. To overcome this limitation, we introduce a new paradigm, Dynamic-granularity TimELine Summarization, (DTELS), which aims to construct adaptive timelines based on user instructions or requirements. This paper establishes a comprehensive benchmark for DTLES that includes: (1) an evaluation framework grounded in journalistic standards to assess the timeline quality across four dimensions: Informativeness, Granular Consistency, Factuality, and Coherence; (2) a large-scale, multi-source dataset with multiple granularity timeline annotations based on a consensus process to facilitate authority; (3) extensive experiments and analysis with two proposed solutions based on Large Language Models (LLMs) and existing state-of-the-art TLS methods. The experimental results demonstrate the effectiveness of LLM-based solutions. However, even the most advanced LLMs struggle to consistently generate timelines that are both informative and granularly consistent, highlighting the challenges of the DTELS task. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09297v1-abstract-full').style.display = 'none'; document.getElementById('2411.09297v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08569">arXiv:2411.08569</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08569">pdf</a>, <a href="https://arxiv.org/format/2411.08569">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> UIFormer: A Unified Transformer-based Framework for Incremental Few-Shot Object Detection and Instance Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chengyuan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yilin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+L">Lei Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+D">Deyin Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+L">Lin Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+B">Bo Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+S">Shichao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Bennamoun%2C+M">Mohammed Bennamoun</a>, <a href="/search/cs?searchtype=author&amp;query=Boussaid%2C+F">Farid Boussaid</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08569v1-abstract-short" style="display: inline;"> This paper introduces a novel framework for unified incremental few-shot object detection (iFSOD) and instance segmentation (iFSIS) using the Transformer architecture. Our goal is to create an optimal solution for situations where only a few examples of novel object classes are available, with no access to training data for base or old classes, while maintaining high performance across both base a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08569v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08569v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08569v1-abstract-full" style="display: none;"> This paper introduces a novel framework for unified incremental few-shot object detection (iFSOD) and instance segmentation (iFSIS) using the Transformer architecture. Our goal is to create an optimal solution for situations where only a few examples of novel object classes are available, with no access to training data for base or old classes, while maintaining high performance across both base and novel classes. To achieve this, We extend Mask-DINO into a two-stage incremental learning framework. Stage 1 focuses on optimizing the model using the base dataset, while Stage 2 involves fine-tuning the model on novel classes. Besides, we incorporate a classifier selection strategy that assigns appropriate classifiers to the encoder and decoder according to their distinct functions. Empirical evidence indicates that this approach effectively mitigates the over-fitting on novel classes learning. Furthermore, we implement knowledge distillation to prevent catastrophic forgetting of base classes. Comprehensive evaluations on the COCO and LVIS datasets for both iFSIS and iFSOD tasks demonstrate that our method significantly outperforms state-of-the-art approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08569v1-abstract-full').style.display = 'none'; document.getElementById('2411.08569v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08509">arXiv:2411.08509</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08509">pdf</a>, <a href="https://arxiv.org/format/2411.08509">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Sum Rate Maximization for Movable Antenna-Aided Downlink RSMA Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Cixiao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+S">Size Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Yin Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Q">Qingqing Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Ou%2C+X">Xiaowu Ou</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+X">Xinghao Guo</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+D">Dazhi He</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Wenjun Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08509v2-abstract-short" style="display: inline;"> Rate splitting multiple access (RSMA) is regarded as a crucial and powerful physical layer (PHY) paradigm for next-generation communication systems. Particularly, users employ successive interference cancellation (SIC) to decode part of the interference while treating the remainder as noise. However, conventional RSMA systems rely on fixed-position antenna arrays, limiting their ability to fully e&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08509v2-abstract-full').style.display = 'inline'; document.getElementById('2411.08509v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08509v2-abstract-full" style="display: none;"> Rate splitting multiple access (RSMA) is regarded as a crucial and powerful physical layer (PHY) paradigm for next-generation communication systems. Particularly, users employ successive interference cancellation (SIC) to decode part of the interference while treating the remainder as noise. However, conventional RSMA systems rely on fixed-position antenna arrays, limiting their ability to fully exploit spatial diversity. This constraint reduces beamforming gain and significantly impairs RSMA performance. To address this problem, we propose a movable antenna (MA)-aided RSMA scheme that allows the antennas at the base station (BS) to dynamically adjust their positions. Our objective is to maximize the system sum rate of common and private messages by jointly optimizing the MA positions, beamforming matrix, and common rate allocation. To tackle the formulated non-convex problem, we apply fractional programming (FP) and develop an efficient two-stage, coarse-to-fine-grained searching (CFGS) algorithm to obtain high-quality solutions. Numerical results demonstrate that, with optimized antenna adjustments, the MA-enabled system achieves substantial performance and reliability improvements in RSMA over fixed-position antenna setups. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08509v2-abstract-full').style.display = 'none'; document.getElementById('2411.08509v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08217">arXiv:2411.08217</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08217">pdf</a>, <a href="https://arxiv.org/format/2411.08217">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> WristSonic: Enabling Fine-grained Hand-Face Interactions on Smartwatches Using Active Acoustic Sensing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mahmud%2C+S">Saif Mahmud</a>, <a href="/search/cs?searchtype=author&amp;query=Mahmoodi%2C+K">Kian Mahmoodi</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+C">Chi-Jung Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Guimbretiere%2C+F">Francois Guimbretiere</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Cheng Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08217v1-abstract-short" style="display: inline;"> Hand-face interactions play a key role in many everyday tasks, providing insights into user habits, behaviors, intentions, and expressions. However, existing wearable sensing systems often struggle to track these interactions in daily settings due to their reliance on multiple sensors or privacy-sensitive, vision-based approaches. To address these challenges, we propose WristSonic, a wrist-worn ac&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08217v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08217v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08217v1-abstract-full" style="display: none;"> Hand-face interactions play a key role in many everyday tasks, providing insights into user habits, behaviors, intentions, and expressions. However, existing wearable sensing systems often struggle to track these interactions in daily settings due to their reliance on multiple sensors or privacy-sensitive, vision-based approaches. To address these challenges, we propose WristSonic, a wrist-worn active acoustic sensing system that uses speakers and microphones to capture ultrasonic reflections from hand, arm, and face movements, enabling fine-grained detection of hand-face interactions with minimal intrusion. By transmitting and analyzing ultrasonic waves, WristSonic distinguishes a wide range of gestures, such as tapping the temple, brushing teeth, and nodding, using a Transformer-based neural network architecture. This approach achieves robust recognition of 21 distinct actions with a single, low-power, privacy-conscious wearable. Through two user studies with 15 participants in controlled and semi-in-the-wild settings, WristSonic demonstrates high efficacy, achieving macro F1-scores of 93.08% and 82.65%, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08217v1-abstract-full').style.display = 'none'; document.getElementById('2411.08217v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 Pages, 10 Figures, Under Review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07690">arXiv:2411.07690</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07690">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> World Models: The Safety Perspective </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+Z">Zifan Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chongzhe Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+F">Feng Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Sifakis%2C+J">Joseph Sifakis</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qunli Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shiming Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+P">Peng Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07690v1-abstract-short" style="display: inline;"> With the proliferation of the Large Language Model (LLM), the concept of World Models (WM) has recently attracted a great deal of attention in the AI research community, especially in the context of AI agents. It is arguably evolving into an essential foundation for building AI agent systems. A WM is intended to help the agent predict the future evolution of environmental states or help the agent&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07690v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07690v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07690v1-abstract-full" style="display: none;"> With the proliferation of the Large Language Model (LLM), the concept of World Models (WM) has recently attracted a great deal of attention in the AI research community, especially in the context of AI agents. It is arguably evolving into an essential foundation for building AI agent systems. A WM is intended to help the agent predict the future evolution of environmental states or help the agent fill in missing information so that it can plan its actions and behave safely. The safety property of WM plays a key role in their effective use in critical applications. In this work, we review and analyze the impacts of the current state-of-the-art in WM technology from the point of view of trustworthiness and safety based on a comprehensive survey and the fields of application envisaged. We provide an in-depth analysis of state-of-the-art WMs and derive technical research challenges and their impact in order to call on the research community to collaborate on improving the safety and trustworthiness of WM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07690v1-abstract-full').style.display = 'none'; document.getElementById('2411.07690v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 3 figures, accepted at the International Workshop on Dependability Modeling and Design (WDMD) during the IEEE International Symposium on Software Reliability Engineering (ISSRE)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07685">arXiv:2411.07685</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07685">pdf</a>, <a href="https://arxiv.org/format/2411.07685">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Fast Disentangled Slim Tensor Learning for Multi-view Clustering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+D">Deng Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zechao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chunlin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Huaxiong Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07685v1-abstract-short" style="display: inline;"> Tensor-based multi-view clustering has recently received significant attention due to its exceptional ability to explore cross-view high-order correlations. However, most existing methods still encounter some limitations. (1) Most of them explore the correlations among different affinity matrices, making them unscalable to large-scale data. (2) Although some methods address it by introducing bipar&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07685v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07685v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07685v1-abstract-full" style="display: none;"> Tensor-based multi-view clustering has recently received significant attention due to its exceptional ability to explore cross-view high-order correlations. However, most existing methods still encounter some limitations. (1) Most of them explore the correlations among different affinity matrices, making them unscalable to large-scale data. (2) Although some methods address it by introducing bipartite graphs, they may result in sub-optimal solutions caused by an unstable anchor selection process. (3) They generally ignore the negative impact of latent semantic-unrelated information in each view. To tackle these issues, we propose a new approach termed fast Disentangled Slim Tensor Learning (DSTL) for multi-view clustering . Instead of focusing on the multi-view graph structures, DSTL directly explores the high-order correlations among multi-view latent semantic representations based on matrix factorization. To alleviate the negative influence of feature redundancy, inspired by robust PCA, DSTL disentangles the latent low-dimensional representation into a semantic-unrelated part and a semantic-related part for each view. Subsequently, two slim tensors are constructed with tensor-based regularization. To further enhance the quality of feature disentanglement, the semantic-related representations are aligned across views through a consensus alignment indicator. Our proposed model is computationally efficient and can be solved effectively. Extensive experiments demonstrate the superiority and efficiency of DSTL over state-of-the-art approaches. The code of DSTL is available at https://github.com/dengxu-nju/DSTL. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07685v1-abstract-full').style.display = 'none'; document.getElementById('2411.07685v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages,6 figures, will be published to IEEE TMM</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07268">arXiv:2411.07268</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07268">pdf</a>, <a href="https://arxiv.org/format/2411.07268">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.3233/FAIA240685">10.3233/FAIA240685 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Target-driven Attack for Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chong Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+M">Mingyu Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Shu%2C+D">Dong Shu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+T">Taowen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+D">Dongfang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+X">Xiaobo Jin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07268v2-abstract-short" style="display: inline;"> Current large language models (LLM) provide a strong foundation for large-scale user-oriented natural language tasks. Many users can easily inject adversarial text or instructions through the user interface, thus causing LLM model security challenges like the language model not giving the correct answer. Although there is currently a large amount of research on black-box attacks, most of these bla&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07268v2-abstract-full').style.display = 'inline'; document.getElementById('2411.07268v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07268v2-abstract-full" style="display: none;"> Current large language models (LLM) provide a strong foundation for large-scale user-oriented natural language tasks. Many users can easily inject adversarial text or instructions through the user interface, thus causing LLM model security challenges like the language model not giving the correct answer. Although there is currently a large amount of research on black-box attacks, most of these black-box attacks use random and heuristic strategies. It is unclear how these strategies relate to the success rate of attacks and thus effectively improve model robustness. To solve this problem, we propose our target-driven black-box attack method to maximize the KL divergence between the conditional probabilities of the clean text and the attack text to redefine the attack&#39;s goal. We transform the distance maximization problem into two convex optimization problems based on the attack goal to solve the attack text and estimate the covariance. Furthermore, the projected gradient descent algorithm solves the vector corresponding to the attack text. Our target-driven black-box attack approach includes two attack strategies: token manipulation and misinformation attack. Experimental results on multiple Large Language Models and datasets demonstrate the effectiveness of our attack method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07268v2-abstract-full').style.display = 'none'; document.getElementById('2411.07268v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 7 figures. This work is an extension of the arXiv:2404.07234 work. We propose new methods. 27th European Conference on Artificial Intelligence 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06991">arXiv:2411.06991</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06991">pdf</a>, <a href="https://arxiv.org/format/2411.06991">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SIESEF-FusionNet: Spatial Inter-correlation Enhancement and Spatially-Embedded Feature Fusion Network for LiDAR Point Cloud Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jiale Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+F">Fei Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Mao%2C+J">Jianliang Mao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoping Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chuanlin Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06991v1-abstract-short" style="display: inline;"> The ambiguity at the boundaries of different semantic classes in point cloud semantic segmentation often leads to incorrect decisions in intelligent perception systems, such as autonomous driving. Hence, accurate delineation of the boundaries is crucial for improving safety in autonomous driving. A novel spatial inter-correlation enhancement and spatially-embedded feature fusion network (SIESEF-Fu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06991v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06991v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06991v1-abstract-full" style="display: none;"> The ambiguity at the boundaries of different semantic classes in point cloud semantic segmentation often leads to incorrect decisions in intelligent perception systems, such as autonomous driving. Hence, accurate delineation of the boundaries is crucial for improving safety in autonomous driving. A novel spatial inter-correlation enhancement and spatially-embedded feature fusion network (SIESEF-FusionNet) is proposed in this paper, enhancing spatial inter-correlation by combining inverse distance weighting and angular compensation to extract more beneficial spatial information without causing redundancy. Meanwhile, a new spatial adaptive pooling module is also designed, embedding enhanced spatial information into semantic features for strengthening the context-awareness of semantic features. Experimental results demonstrate that 83.7% mIoU and 97.8% OA are achieved by SIESEF-FusionNet on the Toronto3D dataset, with performance superior to other baseline methods. A value of 61.1% mIoU is reached on the semanticKITTI dataset, where a marked improvement in segmentation performance is observed. In addition, the effectiveness and plug-and-play capability of the proposed modules are further verified through ablation studies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06991v1-abstract-full').style.display = 'none'; document.getElementById('2411.06991v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06070">arXiv:2411.06070</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06070">pdf</a>, <a href="https://arxiv.org/format/2411.06070">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> GFT: Graph Foundation Model with Transferable Tree Vocabulary </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zehong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zheyuan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Chawla%2C+N+V">Nitesh V Chawla</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chuxu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+Y">Yanfang Ye</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06070v1-abstract-short" style="display: inline;"> Inspired by the success of foundation models in applications such as ChatGPT, as graph data has been ubiquitous, one can envision the far-reaching impacts that can be brought by Graph Foundation Models (GFMs) with broader applications in the areas such as scientific research, social network analysis, drug discovery, and e-commerce. Despite the significant progress of pre-trained graph neural netwo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06070v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06070v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06070v1-abstract-full" style="display: none;"> Inspired by the success of foundation models in applications such as ChatGPT, as graph data has been ubiquitous, one can envision the far-reaching impacts that can be brought by Graph Foundation Models (GFMs) with broader applications in the areas such as scientific research, social network analysis, drug discovery, and e-commerce. Despite the significant progress of pre-trained graph neural networks, there haven&#39;t been GFMs that can achieve desired performance on various graph-learning-related tasks. Building GFMs may rely on a vocabulary that encodes transferable patterns shared among different tasks and domains. Unlike image and text, defining such transferable patterns for graphs remains an open question. In this paper, we aim to bridge this gap by rethinking the transferable patterns on graphs as computation trees -- i.e., tree structures derived from the message-passing process. Based on this insight, we propose a cross-task, cross-domain graph foundation model named GFT, short for Graph Foundation model with transferable Tree vocabulary. By treating computation trees as tokens within the transferable vocabulary, GFT improves model generalization and reduces the risk of negative transfer. The theoretical analyses and extensive experimental studies have demonstrated the transferability of computation trees and shown the effectiveness of GFT across diverse tasks and domains in graph learning. The open source code and data are available at https://github.com/Zehong-Wang/GFT. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06070v1-abstract-full').style.display = 'none'; document.getElementById('2411.06070v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by NeurIPS 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05826">arXiv:2411.05826</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05826">pdf</a>, <a href="https://arxiv.org/ps/2411.05826">ps</a>, <a href="https://arxiv.org/format/2411.05826">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> From Pixels to Prose: Advancing Multi-Modal Language Models for Remote Sensing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xintian Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+B">Benji Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Charles Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+F">Fei Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Niu%2C+Q">Qian Niu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Junyu Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+K">Keyu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Ming Li</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+P">Pohsun Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Bi%2C+Z">Ziqian Bi</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Ming Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yichao Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05826v1-abstract-short" style="display: inline;"> Remote sensing has evolved from simple image acquisition to complex systems capable of integrating and processing visual and textual data. This review examines the development and application of multi-modal language models (MLLMs) in remote sensing, focusing on their ability to interpret and describe satellite imagery using natural language. We cover the technical underpinnings of MLLMs, including&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05826v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05826v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05826v1-abstract-full" style="display: none;"> Remote sensing has evolved from simple image acquisition to complex systems capable of integrating and processing visual and textual data. This review examines the development and application of multi-modal language models (MLLMs) in remote sensing, focusing on their ability to interpret and describe satellite imagery using natural language. We cover the technical underpinnings of MLLMs, including dual-encoder architectures, Transformer models, self-supervised and contrastive learning, and cross-modal integration. The unique challenges of remote sensing data--varying spatial resolutions, spectral richness, and temporal changes--are analyzed for their impact on MLLM performance. Key applications such as scene description, object detection, change detection, text-to-image retrieval, image-to-text generation, and visual question answering are discussed to demonstrate their relevance in environmental monitoring, urban planning, and disaster response. We review significant datasets and resources supporting the training and evaluation of these models. Challenges related to computational demands, scalability, data quality, and domain adaptation are highlighted. We conclude by proposing future research directions and technological advancements to further enhance MLLM utility in remote sensing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05826v1-abstract-full').style.display = 'none'; document.getElementById('2411.05826v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 1 figure</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05783">arXiv:2411.05783</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05783">pdf</a>, <a href="https://arxiv.org/format/2411.05783">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> ASL STEM Wiki: Dataset and Benchmark for Interpreting STEM Articles </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yin%2C+K">Kayo Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+C">Chinmay Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Minakov%2C+F+O">Fyodor O. Minakov</a>, <a href="/search/cs?searchtype=author&amp;query=Milan%2C+V">Vanessa Milan</a>, <a href="/search/cs?searchtype=author&amp;query=Daum%C3%A9%2C+H">Hal Daum茅 III</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Cyril Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+A+X">Alex X. Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Bragg%2C+D">Danielle Bragg</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05783v1-abstract-short" style="display: inline;"> Deaf and hard-of-hearing (DHH) students face significant barriers in accessing science, technology, engineering, and mathematics (STEM) education, notably due to the scarcity of STEM resources in signed languages. To help address this, we introduce ASL STEM Wiki: a parallel corpus of 254 Wikipedia articles on STEM topics in English, interpreted into over 300 hours of American Sign Language (ASL).&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05783v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05783v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05783v1-abstract-full" style="display: none;"> Deaf and hard-of-hearing (DHH) students face significant barriers in accessing science, technology, engineering, and mathematics (STEM) education, notably due to the scarcity of STEM resources in signed languages. To help address this, we introduce ASL STEM Wiki: a parallel corpus of 254 Wikipedia articles on STEM topics in English, interpreted into over 300 hours of American Sign Language (ASL). ASL STEM Wiki is the first continuous signing dataset focused on STEM, facilitating the development of AI resources for STEM education in ASL. We identify several use cases of ASL STEM Wiki with human-centered applications. For example, because this dataset highlights the frequent use of fingerspelling for technical concepts, which inhibits DHH students&#39; ability to learn, we develop models to identify fingerspelled words -- which can later be used to query for appropriate ASL signs to suggest to interpreters. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05783v1-abstract-full').style.display = 'none'; document.getElementById('2411.05783v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to EMNLP 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05676">arXiv:2411.05676</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05676">pdf</a>, <a href="https://arxiv.org/format/2411.05676">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Improving Molecular Graph Generation with Flow Matching and Optimal Transport </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hou%2C+X">Xiaoyang Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+T">Tian Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+M">Milong Ren</a>, <a href="/search/cs?searchtype=author&amp;query=Bu%2C+D">Dongbo Bu</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+X">Xin Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chunming Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+S">Shiwei Sun</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05676v1-abstract-short" style="display: inline;"> Generating molecular graphs is crucial in drug design and discovery but remains challenging due to the complex interdependencies between nodes and edges. While diffusion models have demonstrated their potentiality in molecular graph design, they often suffer from unstable training and inefficient sampling. To enhance generation performance and training stability, we propose GGFlow, a discrete flow&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05676v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05676v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05676v1-abstract-full" style="display: none;"> Generating molecular graphs is crucial in drug design and discovery but remains challenging due to the complex interdependencies between nodes and edges. While diffusion models have demonstrated their potentiality in molecular graph design, they often suffer from unstable training and inefficient sampling. To enhance generation performance and training stability, we propose GGFlow, a discrete flow matching generative model incorporating optimal transport for molecular graphs and it incorporates an edge-augmented graph transformer to enable the direct communications among chemical bounds. Additionally, GGFlow introduces a novel goal-guided generation framework to control the generative trajectory of our model, aiming to design novel molecular structures with the desired properties. GGFlow demonstrates superior performance on both unconditional and conditional molecule generation tasks, outperforming existing baselines and underscoring its effectiveness and potential for wider application. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05676v1-abstract-full').style.display = 'none'; document.getElementById('2411.05676v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05197">arXiv:2411.05197</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05197">pdf</a>, <a href="https://arxiv.org/format/2411.05197">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Hardware and Software Platform Inference </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Cheng Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Foerster%2C+H">Hanna Foerster</a>, <a href="/search/cs?searchtype=author&amp;query=Mullins%2C+R+D">Robert D. Mullins</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yiren Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Shumailov%2C+I">Ilia Shumailov</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05197v1-abstract-short" style="display: inline;"> It is now a common business practice to buy access to large language model (LLM) inference rather than self-host, because of significant upfront hardware infrastructure and energy costs. However, as a buyer, there is no mechanism to verify the authenticity of the advertised service including the serving hardware platform, e.g. that it is actually being served using an NVIDIA H100. Furthermore, the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05197v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05197v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05197v1-abstract-full" style="display: none;"> It is now a common business practice to buy access to large language model (LLM) inference rather than self-host, because of significant upfront hardware infrastructure and energy costs. However, as a buyer, there is no mechanism to verify the authenticity of the advertised service including the serving hardware platform, e.g. that it is actually being served using an NVIDIA H100. Furthermore, there are reports suggesting that model providers may deliver models that differ slightly from the advertised ones, often to make them run on less expensive hardware. That way, a client pays premium for a capable model access on more expensive hardware, yet ends up being served by a (potentially less capable) cheaper model on cheaper hardware. In this paper we introduce \textit{\textbf{hardware and software platform inference (HSPI)}} -- a method for identifying the underlying \GPU{} architecture and software stack of a (black-box) machine learning model solely based on its input-output behavior. Our method leverages the inherent differences of various \GPU{} architectures and compilers to distinguish between different \GPU{} types and software stacks. By analyzing the numerical patterns in the model&#39;s outputs, we propose a classification framework capable of accurately identifying the \GPU{} used for model inference as well as the underlying software configuration. Our findings demonstrate the feasibility of inferring \GPU{} type from black-box models. We evaluate HSPI against models served on different real hardware and find that in a white-box setting we can distinguish between different \GPU{}s with between $83.9\%$ and $100\%$ accuracy. Even in a black-box setting we are able to achieve results that are up to three times higher than random guess accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05197v1-abstract-full').style.display = 'none'; document.getElementById('2411.05197v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05036">arXiv:2411.05036</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05036">pdf</a>, <a href="https://arxiv.org/ps/2411.05036">ps</a>, <a href="https://arxiv.org/format/2411.05036">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> From Word Vectors to Multimodal Embeddings: Techniques, Applications, and Future Directions For Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Charles Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+B">Benji Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xintian Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Niu%2C+Q">Qian Niu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Junyu Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+K">Keyu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Ming Li</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+P">Pohsun Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Bi%2C+Z">Ziqian Bi</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Ming Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yichao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Fei%2C+C">Cheng Fei</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+C+H">Caitlyn Heqi Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+L+K">Lawrence KQ Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+T">Tianyang Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05036v1-abstract-short" style="display: inline;"> Word embeddings and language models have transformed natural language processing (NLP) by facilitating the representation of linguistic elements in continuous vector spaces. This review visits foundational concepts such as the distributional hypothesis and contextual similarity, tracing the evolution from sparse representations like one-hot encoding to dense embeddings including Word2Vec, GloVe, a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05036v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05036v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05036v1-abstract-full" style="display: none;"> Word embeddings and language models have transformed natural language processing (NLP) by facilitating the representation of linguistic elements in continuous vector spaces. This review visits foundational concepts such as the distributional hypothesis and contextual similarity, tracing the evolution from sparse representations like one-hot encoding to dense embeddings including Word2Vec, GloVe, and fastText. We examine both static and contextualized embeddings, underscoring advancements in models such as ELMo, BERT, and GPT and their adaptations for cross-lingual and personalized applications. The discussion extends to sentence and document embeddings, covering aggregation methods and generative topic models, along with the application of embeddings in multimodal domains, including vision, robotics, and cognitive science. Advanced topics such as model compression, interpretability, numerical encoding, and bias mitigation are analyzed, addressing both technical challenges and ethical implications. Additionally, we identify future research directions, emphasizing the need for scalable training techniques, enhanced interpretability, and robust grounding in non-textual modalities. By synthesizing current methodologies and emerging trends, this survey offers researchers and practitioners an in-depth resource to push the boundaries of embedding-based language models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05036v1-abstract-full').style.display = 'none'; document.getElementById('2411.05036v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04905">arXiv:2411.04905</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04905">pdf</a>, <a href="https://arxiv.org/format/2411.04905">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> </div> </div> <p class="title is-5 mathjax"> OpenCoder: The Open Cookbook for Top-Tier Code Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+S">Siming Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+T">Tianhao Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J+K">J. K. Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Hao%2C+J">Jiaran Hao</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+L">Liuyihan Song</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Yang Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">J. Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J+H">J. H. Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chenchen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Chai%2C+L">Linzheng Chai</a>, <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+R">Ruifeng Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhaoxiang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+J">Jie Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Q">Qian Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+G">Ge Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zili Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Qi%2C+Y">Yuan Qi</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Yinghui Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Chu%2C+W">Wei Chu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04905v2-abstract-short" style="display: inline;"> Large language models (LLMs) for code have become indispensable in various domains, including code generation, reasoning tasks and agent systems. While open-access code LLMs are increasingly approaching the performance levels of proprietary models, high-quality code LLMs suitable for rigorous scientific investigation, particularly those with reproducible data processing pipelines and transparent t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04905v2-abstract-full').style.display = 'inline'; document.getElementById('2411.04905v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04905v2-abstract-full" style="display: none;"> Large language models (LLMs) for code have become indispensable in various domains, including code generation, reasoning tasks and agent systems. While open-access code LLMs are increasingly approaching the performance levels of proprietary models, high-quality code LLMs suitable for rigorous scientific investigation, particularly those with reproducible data processing pipelines and transparent training protocols, remain limited. The scarcity is due to various challenges, including resource constraints, ethical considerations, and the competitive advantages of keeping models advanced. To address the gap, we introduce OpenCoder, a top-tier code LLM that not only achieves performance comparable to leading models but also serves as an &#34;open cookbook&#34; for the research community. Unlike most prior efforts, we release not only model weights and inference code, but also the reproducible training data, complete data processing pipeline, rigorous experimental ablation results, and detailed training protocols for open scientific research. Through this comprehensive release, we identify the key ingredients for building a top-tier code LLM: (1) code optimized heuristic rules for data cleaning and methods for data deduplication, (2) recall of text corpus related to code and (3) high-quality synthetic data in both annealing and supervised fine-tuning stages. By offering this level of openness, we aim to broaden access to all aspects of a top-tier code LLM, with OpenCoder serving as both a powerful model and an open foundation to accelerate research, and enable reproducible advancements in code AI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04905v2-abstract-full').style.display = 'none'; document.getElementById('2411.04905v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04558">arXiv:2411.04558</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04558">pdf</a>, <a href="https://arxiv.org/format/2411.04558">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Experimental Secure Multiparty Computation from Quantum Oblivious Transfer with Bit Commitment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+K">Kai-Yi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+A">An-Jing Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Tu%2C+K">Kun Tu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Ming-Han Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Qi%2C+W">Wei Qi</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Ya-Dong Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+Y">Yu Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04558v1-abstract-short" style="display: inline;"> Secure multiparty computation enables collaborative computations across multiple users while preserving individual privacy, which has a wide range of applications in finance, machine learning and healthcare. Secure multiparty computation can be realized using oblivious transfer as a primitive function. In this paper, we present an experimental implementation of a quantum-secure quantum oblivious t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04558v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04558v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04558v1-abstract-full" style="display: none;"> Secure multiparty computation enables collaborative computations across multiple users while preserving individual privacy, which has a wide range of applications in finance, machine learning and healthcare. Secure multiparty computation can be realized using oblivious transfer as a primitive function. In this paper, we present an experimental implementation of a quantum-secure quantum oblivious transfer (QOT) protocol using an adapted quantum key distribution system combined with a bit commitment scheme, surpassing previous approaches only secure in the noisy storage model. We demonstrate the first practical application of the QOT protocol by solving the private set intersection, a prime example of secure multiparty computation, where two parties aim to find common elements in their datasets without revealing any other information. In our experiments, two banks can identify common suspicious accounts without disclosing any other data. This not only proves the experimental functionality of QOT, but also showcases its real-world commercial applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04558v1-abstract-full').style.display = 'none'; document.getElementById('2411.04558v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04353">arXiv:2411.04353</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04353">pdf</a>, <a href="https://arxiv.org/format/2411.04353">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Other Condensed Matter">cond-mat.other</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> On the hardness of learning ground state entanglement of geometrically local Hamiltonians </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bouland%2C+A">Adam Bouland</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chenyi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Z">Zixin Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04353v1-abstract-short" style="display: inline;"> Characterizing the entanglement structure of ground states of local Hamiltonians is a fundamental problem in quantum information. In this work we study the computational complexity of this problem, given the Hamiltonian as input. Our main result is that to show it is cryptographically hard to determine if the ground state of a geometrically local, polynomially gapped Hamiltonian on qudits (&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04353v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04353v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04353v1-abstract-full" style="display: none;"> Characterizing the entanglement structure of ground states of local Hamiltonians is a fundamental problem in quantum information. In this work we study the computational complexity of this problem, given the Hamiltonian as input. Our main result is that to show it is cryptographically hard to determine if the ground state of a geometrically local, polynomially gapped Hamiltonian on qudits ($d=O(1)$) has near-area law vs near-volume law entanglement. This improves prior work of Bouland et al. (arXiv:2311.12017) showing this for non-geometrically local Hamiltonians. In particular we show this problem is roughly factoring-hard in 1D, and LWE-hard in 2D. Our proof works by constructing a novel form of public-key pseudo-entanglement which is highly space-efficient, and combining this with a modification of Gottesman and Irani&#39;s quantum Turing machine to Hamiltonian construction. Our work suggests that the problem of learning so-called &#34;gapless&#34; quantum phases of matter might be intractable. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04353v1-abstract-full').style.display = 'none'; document.getElementById('2411.04353v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">47 pages, 10 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04205">arXiv:2411.04205</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04205">pdf</a>, <a href="https://arxiv.org/format/2411.04205">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> </div> </div> <p class="title is-5 mathjax"> Scalable DP-SGD: Shuffling vs. Poisson Subsampling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chua%2C+L">Lynn Chua</a>, <a href="/search/cs?searchtype=author&amp;query=Ghazi%2C+B">Badih Ghazi</a>, <a href="/search/cs?searchtype=author&amp;query=Kamath%2C+P">Pritish Kamath</a>, <a href="/search/cs?searchtype=author&amp;query=Kumar%2C+R">Ravi Kumar</a>, <a href="/search/cs?searchtype=author&amp;query=Manurangsi%2C+P">Pasin Manurangsi</a>, <a href="/search/cs?searchtype=author&amp;query=Sinha%2C+A">Amer Sinha</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chiyuan Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04205v1-abstract-short" style="display: inline;"> We provide new lower bounds on the privacy guarantee of the multi-epoch Adaptive Batch Linear Queries (ABLQ) mechanism with shuffled batch sampling, demonstrating substantial gaps when compared to Poisson subsampling; prior analysis was limited to a single epoch. Since the privacy analysis of Differentially Private Stochastic Gradient Descent (DP-SGD) is obtained by analyzing the ABLQ mechanism, t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04205v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04205v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04205v1-abstract-full" style="display: none;"> We provide new lower bounds on the privacy guarantee of the multi-epoch Adaptive Batch Linear Queries (ABLQ) mechanism with shuffled batch sampling, demonstrating substantial gaps when compared to Poisson subsampling; prior analysis was limited to a single epoch. Since the privacy analysis of Differentially Private Stochastic Gradient Descent (DP-SGD) is obtained by analyzing the ABLQ mechanism, this brings into serious question the common practice of implementing shuffling-based DP-SGD, but reporting privacy parameters as if Poisson subsampling was used. To understand the impact of this gap on the utility of trained machine learning models, we introduce a practical approach to implement Poisson subsampling at scale using massively parallel computation, and efficiently train models with the same. We compare the utility of models trained with Poisson-subsampling-based DP-SGD, and the optimistic estimates of utility when using shuffling, via our new lower bounds on the privacy guarantee of ABLQ with shuffling. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04205v1-abstract-full').style.display = 'none'; document.getElementById('2411.04205v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear at NeurIPS 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03349">arXiv:2411.03349</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.03349">pdf</a>, <a href="https://arxiv.org/format/2411.03349">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> RuAG: Learned-rule-augmented Generation for Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yudi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+P">Pei Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Lu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chaoyun Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+M">Meng Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+Y">Yali Du</a>, <a href="/search/cs?searchtype=author&amp;query=Puzyrev%2C+Y">Yevgeniy Puzyrev</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+R">Randolph Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Qin%2C+S">Si Qin</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Q">Qingwei Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Pechenizkiy%2C+M">Mykola Pechenizkiy</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+D">Dongmei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Rajmohan%2C+S">Saravan Rajmohan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qi Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03349v1-abstract-short" style="display: inline;"> In-context learning (ICL) and Retrieval-Augmented Generation (RAG) have gained attention for their ability to enhance LLMs&#39; reasoning by incorporating external knowledge but suffer from limited contextual window size, leading to insufficient information injection. To this end, we propose a novel framework, RuAG, to automatically distill large volumes of offline data into interpretable first-order&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03349v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03349v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03349v1-abstract-full" style="display: none;"> In-context learning (ICL) and Retrieval-Augmented Generation (RAG) have gained attention for their ability to enhance LLMs&#39; reasoning by incorporating external knowledge but suffer from limited contextual window size, leading to insufficient information injection. To this end, we propose a novel framework, RuAG, to automatically distill large volumes of offline data into interpretable first-order logic rules, which are injected into LLMs to boost their reasoning capabilities. Our method begins by formulating the search process relying on LLMs&#39; commonsense, where LLMs automatically define head and body predicates. Then, RuAG applies Monte Carlo Tree Search (MCTS) to address the combinational searching space and efficiently discover logic rules from data. The resulting logic rules are translated into natural language, allowing targeted knowledge injection and seamless integration into LLM prompts for LLM&#39;s downstream task reasoning. We evaluate our framework on public and private industrial tasks, including natural language processing, time-series, decision-making, and industrial tasks, demonstrating its effectiveness in enhancing LLM&#39;s capability over diverse tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03349v1-abstract-full').style.display = 'none'; document.getElementById('2411.03349v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02840">arXiv:2411.02840</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02840">pdf</a>, <a href="https://arxiv.org/format/2411.02840">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Test-Time Dynamic Image Fusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cao%2C+B">Bing Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+Y">Yinan Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+Y">Yi Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Changqing Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Q">Qinghua Hu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02840v1-abstract-short" style="display: inline;"> The inherent challenge of image fusion lies in capturing the correlation of multi-source images and comprehensively integrating effective information from different sources. Most existing techniques fail to perform dynamic image fusion while notably lacking theoretical guarantees, leading to potential deployment risks in this field. Is it possible to conduct dynamic image fusion with a clear theor&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02840v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02840v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02840v1-abstract-full" style="display: none;"> The inherent challenge of image fusion lies in capturing the correlation of multi-source images and comprehensively integrating effective information from different sources. Most existing techniques fail to perform dynamic image fusion while notably lacking theoretical guarantees, leading to potential deployment risks in this field. Is it possible to conduct dynamic image fusion with a clear theoretical justification? In this paper, we give our solution from a generalization perspective. We proceed to reveal the generalized form of image fusion and derive a new test-time dynamic image fusion paradigm. It provably reduces the upper bound of generalization error. Specifically, we decompose the fused image into multiple components corresponding to its source data. The decomposed components represent the effective information from the source data, thus the gap between them reflects the Relative Dominability (RD) of the uni-source data in constructing the fusion image. Theoretically, we prove that the key to reducing generalization error hinges on the negative correlation between the RD-based fusion weight and the uni-source reconstruction loss. Intuitively, RD dynamically highlights the dominant regions of each source and can be naturally converted to the corresponding fusion weight, achieving robust results. Extensive experiments and discussions with in-depth analysis on multiple benchmarks confirm our findings and superiority. Our code is available at https://github.com/Yinan-Xia/TTD. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02840v1-abstract-full').style.display = 'none'; document.getElementById('2411.02840v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by NeurIPS 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02397">arXiv:2411.02397</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02397">pdf</a>, <a href="https://arxiv.org/format/2411.02397">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Adaptive Caching for Faster Video Generation with Diffusion Transformers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kahatapitiya%2C+K">Kumara Kahatapitiya</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Haozhe Liu</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+S">Sen He</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+D">Ding Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Menglin Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chenyang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ryoo%2C+M+S">Michael S. Ryoo</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+T">Tian Xie</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02397v2-abstract-short" style="display: inline;"> Generating temporally-consistent high-fidelity videos can be computationally expensive, especially over longer temporal spans. More-recent Diffusion Transformers (DiTs) -- despite making significant headway in this context -- have only heightened such challenges as they rely on larger models and heavier attention mechanisms, resulting in slower inference speeds. In this paper, we introduce a train&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02397v2-abstract-full').style.display = 'inline'; document.getElementById('2411.02397v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02397v2-abstract-full" style="display: none;"> Generating temporally-consistent high-fidelity videos can be computationally expensive, especially over longer temporal spans. More-recent Diffusion Transformers (DiTs) -- despite making significant headway in this context -- have only heightened such challenges as they rely on larger models and heavier attention mechanisms, resulting in slower inference speeds. In this paper, we introduce a training-free method to accelerate video DiTs, termed Adaptive Caching (AdaCache), which is motivated by the fact that &#34;not all videos are created equal&#34;: meaning, some videos require fewer denoising steps to attain a reasonable quality than others. Building on this, we not only cache computations through the diffusion process, but also devise a caching schedule tailored to each video generation, maximizing the quality-latency trade-off. We further introduce a Motion Regularization (MoReg) scheme to utilize video information within AdaCache, essentially controlling the compute allocation based on motion content. Altogether, our plug-and-play contributions grant significant inference speedups (e.g. up to 4.7x on Open-Sora 720p - 2s video generation) without sacrificing the generation quality, across multiple video DiT baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02397v2-abstract-full').style.display = 'none'; document.getElementById('2411.02397v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project-page is available at https://adacache-dit.github.io</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02336">arXiv:2411.02336</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02336">pdf</a>, <a href="https://arxiv.org/format/2411.02336">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MVPaint: Synchronized Multi-View Diffusion for Painting Anything 3D </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+W">Wei Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Mu%2C+J">Juncheng Mu</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+X">Xianfang Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Pang%2C+A">Anqi Pang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhibin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+B">Bin Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+G">Gang Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Ziwei Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+L">Liang Pan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02336v1-abstract-short" style="display: inline;"> Texturing is a crucial step in the 3D asset production workflow, which enhances the visual appeal and diversity of 3D assets. Despite recent advancements in Text-to-Texture (T2T) generation, existing methods often yield subpar results, primarily due to local discontinuities, inconsistencies across multiple views, and their heavy dependence on UV unwrapping outcomes. To tackle these challenges, we&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02336v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02336v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02336v1-abstract-full" style="display: none;"> Texturing is a crucial step in the 3D asset production workflow, which enhances the visual appeal and diversity of 3D assets. Despite recent advancements in Text-to-Texture (T2T) generation, existing methods often yield subpar results, primarily due to local discontinuities, inconsistencies across multiple views, and their heavy dependence on UV unwrapping outcomes. To tackle these challenges, we propose a novel generation-refinement 3D texturing framework called MVPaint, which can generate high-resolution, seamless textures while emphasizing multi-view consistency. MVPaint mainly consists of three key modules. 1) Synchronized Multi-view Generation (SMG). Given a 3D mesh model, MVPaint first simultaneously generates multi-view images by employing an SMG model, which leads to coarse texturing results with unpainted parts due to missing observations. 2) Spatial-aware 3D Inpainting (S3I). To ensure complete 3D texturing, we introduce the S3I method, specifically designed to effectively texture previously unobserved areas. 3) UV Refinement (UVR). Furthermore, MVPaint employs a UVR module to improve the texture quality in the UV space, which first performs a UV-space Super-Resolution, followed by a Spatial-aware Seam-Smoothing algorithm for revising spatial texturing discontinuities caused by UV unwrapping. Moreover, we establish two T2T evaluation benchmarks: the Objaverse T2T benchmark and the GSO T2T benchmark, based on selected high-quality 3D meshes from the Objaverse dataset and the entire GSO dataset, respectively. Extensive experimental results demonstrate that MVPaint surpasses existing state-of-the-art methods. Notably, MVPaint could generate high-fidelity textures with minimal Janus issues and highly enhanced cross-view consistency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02336v1-abstract-full').style.display = 'none'; document.getElementById('2411.02336v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project Page: https://mvpaint.github.io</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02265">arXiv:2411.02265</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02265">pdf</a>, <a href="https://arxiv.org/format/2411.02265">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Hunyuan-Large: An Open-Source MoE Model with 52 Billion Activated Parameters by Tencent </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xingwu Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yanfeng Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Y">Yiqing Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+R">Ruobing Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+J">Jiaqi Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+K">Kai Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+S">Shuaipeng Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Zhen Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Jonny Han</a>, <a href="/search/cs?searchtype=author&amp;query=Shu%2C+X">Xiaobo Shu</a>, <a href="/search/cs?searchtype=author&amp;query=Bu%2C+J">Jiahao Bu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zhongzhi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+X">Xuemeng Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Lian%2C+F">Fengzong Lian</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+S">Saiyong Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+J">Jianfeng Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+Y">Yuyuan Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+X">Xiaoqin Ren</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+C">Chao Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+L">Lulu Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Mao%2C+Y">Yue Mao</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+J">Jun Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+T">Tao Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+S">Suncong Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+K">Kan Wu</a> , et al. (83 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02265v3-abstract-short" style="display: inline;"> In this paper, we introduce Hunyuan-Large, which is currently the largest open-source Transformer-based mixture of experts model, with a total of 389 billion parameters and 52 billion activation parameters, capable of handling up to 256K tokens. We conduct a thorough evaluation of Hunyuan-Large&#39;s superior performance across various benchmarks including language understanding and generation, logica&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02265v3-abstract-full').style.display = 'inline'; document.getElementById('2411.02265v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02265v3-abstract-full" style="display: none;"> In this paper, we introduce Hunyuan-Large, which is currently the largest open-source Transformer-based mixture of experts model, with a total of 389 billion parameters and 52 billion activation parameters, capable of handling up to 256K tokens. We conduct a thorough evaluation of Hunyuan-Large&#39;s superior performance across various benchmarks including language understanding and generation, logical reasoning, mathematical problem-solving, coding, long-context, and aggregated tasks, where it outperforms LLama3.1-70B and exhibits comparable performance when compared to the significantly larger LLama3.1-405B model. Key practice of Hunyuan-Large include large-scale synthetic data that is orders larger than in previous literature, a mixed expert routing strategy, a key-value cache compression technique, and an expert-specific learning rate strategy. Additionally, we also investigate the scaling laws and learning rate schedule of mixture of experts models, providing valuable insights and guidances for future model development and optimization. The code and checkpoints of Hunyuan-Large are released to facilitate future innovations and applications. Codes: https://github.com/Tencent/Hunyuan-Large Models: https://huggingface.co/tencent/Tencent-Hunyuan-Large <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02265v3-abstract-full').style.display = 'none'; document.getElementById('2411.02265v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 4 Figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01766">arXiv:2411.01766</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01766">pdf</a>, <a href="https://arxiv.org/ps/2411.01766">ps</a>, <a href="https://arxiv.org/format/2411.01766">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Lyapunov-guided Multi-Agent Reinforcement Learning for Delay-Sensitive Wireless Scheduling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Cheng Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+L">Lan Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+J">Ji Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zening Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Y">Yongming Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01766v2-abstract-short" style="display: inline;"> In this paper, a two-stage intelligent scheduler is proposed to minimize the packet-level delay jitter while guaranteeing delay bound. Firstly, Lyapunov technology is employed to transform the delay-violation constraint into a sequential slot-level queue stability problem. Secondly, a hierarchical scheme is proposed to solve the resource allocation between multiple base stations and users, where t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01766v2-abstract-full').style.display = 'inline'; document.getElementById('2411.01766v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01766v2-abstract-full" style="display: none;"> In this paper, a two-stage intelligent scheduler is proposed to minimize the packet-level delay jitter while guaranteeing delay bound. Firstly, Lyapunov technology is employed to transform the delay-violation constraint into a sequential slot-level queue stability problem. Secondly, a hierarchical scheme is proposed to solve the resource allocation between multiple base stations and users, where the multi-agent reinforcement learning (MARL) gives the user priority and the number of scheduled packets, while the underlying scheduler allocates the resource. Our proposed scheme achieves lower delay jitter and delay violation rate than the Round-Robin Earliest Deadline First algorithm and MARL with delay violation penalty. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01766v2-abstract-full').style.display = 'none'; document.getElementById('2411.01766v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01540">arXiv:2411.01540</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01540">pdf</a>, <a href="https://arxiv.org/format/2411.01540">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3627673.3679682">10.1145/3627673.3679682 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Efficient and Robust Regularized Federated Recommendation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+L">Langming Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wanyu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+X">Xiangyu Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zijian Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chunxu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+S">Shanru Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yiqi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+L">Lixin Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zitao Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+X">Xuetao Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+H">Hongzhi Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Q">Qing Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01540v1-abstract-short" style="display: inline;"> Recommender systems play a pivotal role across practical scenarios, showcasing remarkable capabilities in user preference modeling. However, the centralized learning paradigm predominantly used raises serious privacy concerns. The federated recommender system (FedRS) addresses this by updating models on clients, while a central server orchestrates training without accessing private data. Existing&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01540v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01540v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01540v1-abstract-full" style="display: none;"> Recommender systems play a pivotal role across practical scenarios, showcasing remarkable capabilities in user preference modeling. However, the centralized learning paradigm predominantly used raises serious privacy concerns. The federated recommender system (FedRS) addresses this by updating models on clients, while a central server orchestrates training without accessing private data. Existing FedRS approaches, however, face unresolved challenges, including non-convex optimization, vulnerability, potential privacy leakage risk, and communication inefficiency. This paper addresses these challenges by reformulating the federated recommendation problem as a convex optimization issue, ensuring convergence to the global optimum. Based on this, we devise a novel method, RFRec, to tackle this optimization problem efficiently. In addition, we propose RFRecF, a highly efficient version that incorporates non-uniform stochastic gradient descent to improve communication efficiency. In user preference modeling, both methods learn local and global models, collaboratively learning users&#39; common and personalized interests under the federated learning setting. Moreover, both methods significantly enhance communication efficiency, robustness, and privacy protection, with theoretical support. Comprehensive evaluations on four benchmark datasets demonstrate RFRec and RFRecF&#39;s superior performance compared to diverse baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01540v1-abstract-full').style.display = 'none'; document.getElementById('2411.01540v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CIKM 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01537">arXiv:2411.01537</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01537">pdf</a>, <a href="https://arxiv.org/format/2411.01537">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3539618.3591717">10.1145/3539618.3591717 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> LinRec: Linear Attention Mechanism for Long-term Sequential Recommender Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+L">Langming Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+X">Xiangyu Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+J">Jingtong Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wanyu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+W">Wenqi Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yiqi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+M">Ming He</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zitao Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Q">Qing Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01537v1-abstract-short" style="display: inline;"> Transformer models have achieved remarkable success in sequential recommender systems (SRSs). However, computing the attention matrix in traditional dot-product attention mechanisms results in a quadratic complexity with sequence lengths, leading to high computational costs for long-term sequential recommendation. Motivated by the above observation, we propose a novel L2-Normalized Linear Attentio&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01537v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01537v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01537v1-abstract-full" style="display: none;"> Transformer models have achieved remarkable success in sequential recommender systems (SRSs). However, computing the attention matrix in traditional dot-product attention mechanisms results in a quadratic complexity with sequence lengths, leading to high computational costs for long-term sequential recommendation. Motivated by the above observation, we propose a novel L2-Normalized Linear Attention for the Transformer-based Sequential Recommender Systems (LinRec), which theoretically improves efficiency while preserving the learning capabilities of the traditional dot-product attention. Specifically, by thoroughly examining the equivalence conditions of efficient attention mechanisms, we show that LinRec possesses linear complexity while preserving the property of attention mechanisms. In addition, we reveal its latent efficiency properties by interpreting the proposed LinRec mechanism through a statistical lens. Extensive experiments are conducted based on two public benchmark datasets, demonstrating that the combination of LinRec and Transformer models achieves comparable or even superior performance than state-of-the-art Transformer-based SRS models while significantly improving time and memory efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01537v1-abstract-full').style.display = 'none'; document.getElementById('2411.01537v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">SIGIR 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01178">arXiv:2411.01178</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01178">pdf</a>, <a href="https://arxiv.org/format/2411.01178">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> LLM4PR: Improving Post-Ranking in Search Engine with Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yan%2C+Y">Yang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yihao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Hou%2C+W">Wenyuan Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+K">Kang Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+X">Xingkai Ren</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zelun Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhai%2C+Z">Zhixin Zhai</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+E">Enyun Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Ou%2C+W">Wenwu Ou</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+Y">Yang Song</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01178v1-abstract-short" style="display: inline;"> Alongside the rapid development of Large Language Models (LLMs), there has been a notable increase in efforts to integrate LLM techniques in information retrieval (IR) and search engines (SE). Recently, an additional post-ranking stage is suggested in SE to enhance user satisfaction in practical applications. Nevertheless, research dedicated to enhancing the post-ranking stage through LLMs remains&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01178v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01178v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01178v1-abstract-full" style="display: none;"> Alongside the rapid development of Large Language Models (LLMs), there has been a notable increase in efforts to integrate LLM techniques in information retrieval (IR) and search engines (SE). Recently, an additional post-ranking stage is suggested in SE to enhance user satisfaction in practical applications. Nevertheless, research dedicated to enhancing the post-ranking stage through LLMs remains largely unexplored. In this study, we introduce a novel paradigm named Large Language Models for Post-Ranking in search engine (LLM4PR), which leverages the capabilities of LLMs to accomplish the post-ranking task in SE. Concretely, a Query-Instructed Adapter (QIA) module is designed to derive the user/item representation vectors by incorporating their heterogeneous features. A feature adaptation step is further introduced to align the semantics of user/item representations with the LLM. Finally, the LLM4PR integrates a learning to post-rank step, leveraging both a main task and an auxiliary task to fine-tune the model to adapt the post-ranking task. Experiment studies demonstrate that the proposed framework leads to significant improvements and exhibits state-of-the-art performance compared with other alternatives. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01178v1-abstract-full').style.display = 'none'; document.getElementById('2411.01178v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.00881">arXiv:2411.00881</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.00881">pdf</a>, <a href="https://arxiv.org/format/2411.00881">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Technical Report for SoccerNet Challenge 2022 -- Replay Grounding Task </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+S">Shimin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+W">Wei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Chu%2C+J">Jiaming Chu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+Y">Yandong Guo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.00881v1-abstract-short" style="display: inline;"> In order to make full use of video information, we transform the replay grounding problem into a video action location problem. We apply a unified network Faster-TAD proposed by us for temporal action detection to get the results of replay grounding. Finally, by observing the data distribution of the training data, we refine the output of the model to get the final submission. </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.00881v1-abstract-full" style="display: none;"> In order to make full use of video information, we transform the replay grounding problem into a video action location problem. We apply a unified network Faster-TAD proposed by us for temporal action detection to get the results of replay grounding. Finally, by observing the data distribution of the training data, we refine the output of the model to get the final submission. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00881v1-abstract-full').style.display = 'none'; document.getElementById('2411.00881v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.00850">arXiv:2411.00850</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.00850">pdf</a>, <a href="https://arxiv.org/format/2411.00850">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> GWQ: Gradient-Aware Weight Quantization for Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shao%2C+Y">Yihua Shao</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+S">Siyu Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+X">Xiaolin Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Ling%2C+Z">Zijian Ling</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+Z">Zixian Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+M">Minxi Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Haiyang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+S">Siyu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+Z">Ziyang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Meng%2C+Y">Yilan Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chenyu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Qin%2C+H">Haotong Qin</a>, <a href="/search/cs?searchtype=author&amp;query=Magno%2C+M">Michele Magno</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Lei%2C+Z">Zhen Lei</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+J">Jingcai Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Shao%2C+L">Ling Shao</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+H">Hao Tang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.00850v1-abstract-short" style="display: inline;"> Large language models (LLMs) show impressive performance in solving complex languagetasks. However, its large number of parameterspresent significant challenges for the deployment and application of the model on edge devices. Compressing large language models to low bits can enable them to run on resource-constrained devices, often leading to performance degradation. To address this problem, we pr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00850v1-abstract-full').style.display = 'inline'; document.getElementById('2411.00850v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.00850v1-abstract-full" style="display: none;"> Large language models (LLMs) show impressive performance in solving complex languagetasks. However, its large number of parameterspresent significant challenges for the deployment and application of the model on edge devices. Compressing large language models to low bits can enable them to run on resource-constrained devices, often leading to performance degradation. To address this problem, we propose gradient-aware weight quantization (GWQ), the first quantization approach for low-bit weight quantization that leverages gradients to localize outliers, requiring only a minimal amount of calibration data for outlier detection. GWQ retains the weights corresponding to the top 1% outliers preferentially at FP16 precision, while the remaining non-outlier weights are stored in a low-bit format. GWQ found experimentally that utilizing the sensitive weights in the gradient localization model is more scientific compared to utilizing the sensitive weights in the Hessian matrix localization model. Compared to current quantization methods, GWQ can be applied to multiple language models and achieves lower PPL on the WikiText2 and C4 dataset. In the zero-shot task, GWQ quantized models have higher accuracy compared to other quantization methods.GWQ is also suitable for multimodal model quantization, and the quantized Qwen-VL family model is more accurate than other methods. zero-shot target detection task dataset RefCOCO outperforms the current stat-of-the-arts method SPQR. GWQ achieves 1.2x inference speedup in comparison to the original model, and effectively reduces the inference memory. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00850v1-abstract-full').style.display = 'none'; document.getElementById('2411.00850v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.00785">arXiv:2411.00785</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.00785">pdf</a>, <a href="https://arxiv.org/format/2411.00785">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> IGOR: Image-GOal Representations are the Atomic Control Units for Foundation Models in Embodied AI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xiaoyu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+J">Junliang Guo</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+T">Tianyu He</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chuheng Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+P">Pushi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+D+C">Derek Cathera Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+L">Li Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Bian%2C+J">Jiang Bian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.00785v1-abstract-short" style="display: inline;"> We introduce Image-GOal Representations (IGOR), aiming to learn a unified, semantically consistent action space across human and various robots. Through this unified latent action space, IGOR enables knowledge transfer among large-scale robot and human activity data. We achieve this by compressing visual changes between an initial image and its goal state into latent actions. IGOR allows us to gen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00785v1-abstract-full').style.display = 'inline'; document.getElementById('2411.00785v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.00785v1-abstract-full" style="display: none;"> We introduce Image-GOal Representations (IGOR), aiming to learn a unified, semantically consistent action space across human and various robots. Through this unified latent action space, IGOR enables knowledge transfer among large-scale robot and human activity data. We achieve this by compressing visual changes between an initial image and its goal state into latent actions. IGOR allows us to generate latent action labels for internet-scale video data. This unified latent action space enables the training of foundation policy and world models across a wide variety of tasks performed by both robots and humans. We demonstrate that: (1) IGOR learns a semantically consistent action space for both human and robots, characterizing various possible motions of objects representing the physical interaction knowledge; (2) IGOR can &#34;migrate&#34; the movements of the object in the one video to other videos, even across human and robots, by jointly using the latent action model and world model; (3) IGOR can learn to align latent actions with natural language through the foundation policy model, and integrate latent actions with a low-level policy model to achieve effective robot control. We believe IGOR opens new possibilities for human-to-robot knowledge transfer and control. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00785v1-abstract-full').style.display = 'none'; document.getElementById('2411.00785v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.24095">arXiv:2410.24095</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.24095">pdf</a>, <a href="https://arxiv.org/format/2410.24095">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> </div> </div> <p class="title is-5 mathjax"> Network Games Induced Prior for Graph Topology Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chenyue Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shangyuan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wai%2C+H">Hoi-To Wai</a>, <a href="/search/cs?searchtype=author&amp;query=So%2C+A+M">Anthony Man-Cho So</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.24095v1-abstract-short" style="display: inline;"> Learning the graph topology of a complex network is challenging due to limited data availability and imprecise data models. A common remedy in existing works is to incorporate priors such as sparsity or modularity which highlight on the structural property of graph topology. We depart from these approaches to develop priors that are directly inspired by complex network dynamics. Focusing on social&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.24095v1-abstract-full').style.display = 'inline'; document.getElementById('2410.24095v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.24095v1-abstract-full" style="display: none;"> Learning the graph topology of a complex network is challenging due to limited data availability and imprecise data models. A common remedy in existing works is to incorporate priors such as sparsity or modularity which highlight on the structural property of graph topology. We depart from these approaches to develop priors that are directly inspired by complex network dynamics. Focusing on social networks with actions modeled by equilibriums of linear quadratic games, we postulate that the social network topologies are optimized with respect to a social welfare function. Utilizing this prior knowledge, we propose a network games induced regularizer to assist graph learning. We then formulate the graph topology learning problem as a bilevel program. We develop a two-timescale gradient algorithm to tackle the latter. We draw theoretical insights on the optimal graph structure of the bilevel program and show that they agree with the topology in several man-made networks. Empirically, we demonstrate the proposed formulation gives rise to reliable estimate of graph topology. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.24095v1-abstract-full').style.display = 'none'; document.getElementById('2410.24095v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Zhang%2C+C&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Zhang%2C+C&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhang%2C+C&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhang%2C+C&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhang%2C+C&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhang%2C+C&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10