CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 1,104 results for author: <span class="mathjax">Zhu, H</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Zhu%2C+H">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Zhu, H"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Zhu%2C+H&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Zhu, H"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Zhu%2C+H&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Zhu%2C+H&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhu%2C+H&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhu%2C+H&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhu%2C+H&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhu%2C+H&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.18347">arXiv:2411.18347</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.18347">pdf</a>, <a href="https://arxiv.org/format/2411.18347">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> TransferFuzz: Fuzzing with Historical Trace for Verifying Propagated Vulnerability Code </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+S">Siyuan Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yuekang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zuxin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+C">Chaopeng Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yongpan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Hong Li</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yongle Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hongsong Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.18347v1-abstract-short" style="display: inline;"> Code reuse in software development frequently facilitates the spread of vulnerabilities, making the scope of affected software in CVE reports imprecise. Traditional methods primarily focus on identifying reused vulnerability code within target software, yet they cannot verify if these vulnerabilities can be triggered in new software contexts. This limitation often results in false positives. In th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.18347v1-abstract-full').style.display = 'inline'; document.getElementById('2411.18347v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.18347v1-abstract-full" style="display: none;"> Code reuse in software development frequently facilitates the spread of vulnerabilities, making the scope of affected software in CVE reports imprecise. Traditional methods primarily focus on identifying reused vulnerability code within target software, yet they cannot verify if these vulnerabilities can be triggered in new software contexts. This limitation often results in false positives. In this paper, we introduce TransferFuzz, a novel vulnerability verification framework, to verify whether vulnerabilities propagated through code reuse can be triggered in new software. Innovatively, we collected runtime information during the execution or fuzzing of the basic binary (the vulnerable binary detailed in CVE reports). This process allowed us to extract historical traces, which proved instrumental in guiding the fuzzing process for the target binary (the new binary that reused the vulnerable function). TransferFuzz introduces a unique Key Bytes Guided Mutation strategy and a Nested Simulated Annealing algorithm, which transfers these historical traces to implement trace-guided fuzzing on the target binary, facilitating the accurate and efficient verification of the propagated vulnerability. Our evaluation, conducted on widely recognized datasets, shows that TransferFuzz can quickly validate vulnerabilities previously unverifiable with existing techniques. Its verification speed is 2.5 to 26.2 times faster than existing methods. Moreover, TransferFuzz has proven its effectiveness by expanding the impacted software scope for 15 vulnerabilities listed in CVE reports, increasing the number of affected binaries from 15 to 53. The datasets and source code used in this article are available at https://github.com/Siyuan-Li201/TransferFuzz. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.18347v1-abstract-full').style.display = 'none'; document.getElementById('2411.18347v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by 2025 IEEE/ACM 47th International Conference on Software Engineering (ICSE&#39;25)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.17917">arXiv:2411.17917</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.17917">pdf</a>, <a href="https://arxiv.org/format/2411.17917">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> DECODE: Domain-aware Continual Domain Expansion for Motion Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+B">Boqi Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haojie Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H+X">Henry X. Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.17917v1-abstract-short" style="display: inline;"> Motion prediction is critical for autonomous vehicles to effectively navigate complex environments and accurately anticipate the behaviors of other traffic participants. As autonomous driving continues to evolve, the need to assimilate new and varied driving scenarios necessitates frequent model updates through retraining. To address these demands, we introduce DECODE, a novel continual learning f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17917v1-abstract-full').style.display = 'inline'; document.getElementById('2411.17917v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.17917v1-abstract-full" style="display: none;"> Motion prediction is critical for autonomous vehicles to effectively navigate complex environments and accurately anticipate the behaviors of other traffic participants. As autonomous driving continues to evolve, the need to assimilate new and varied driving scenarios necessitates frequent model updates through retraining. To address these demands, we introduce DECODE, a novel continual learning framework that begins with a pre-trained generalized model and incrementally develops specialized models for distinct domains. Unlike existing continual learning approaches that attempt to develop a unified model capable of generalizing across diverse scenarios, DECODE uniquely balances specialization with generalization, dynamically adjusting to real-time demands. The proposed framework leverages a hypernetwork to generate model parameters, significantly reducing storage requirements, and incorporates a normalizing flow mechanism for real-time model selection based on likelihood estimation. Furthermore, DECODE merges outputs from the most relevant specialized and generalized models using deep Bayesian uncertainty estimation techniques. This integration ensures optimal performance in familiar conditions while maintaining robustness in unfamiliar scenarios. Extensive evaluations confirm the effectiveness of the framework, achieving a notably low forgetting rate of 0.044 and an average minADE of 0.584 m, significantly surpassing traditional learning strategies and demonstrating adaptability across a wide range of driving conditions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17917v1-abstract-full').style.display = 'none'; document.getElementById('2411.17917v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work has been submitted to the IEEE for possible publication</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.9; I.5.1 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16002">arXiv:2411.16002</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16002">pdf</a>, <a href="https://arxiv.org/format/2411.16002">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Exploring Performance Contrasts in TableQA: Step-by-Step Reasoning Boosts Bigger Language Models, Limits Smaller Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+H">Haoyan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yixuan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+K">Keyue Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hongjin Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuanxin Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16002v1-abstract-short" style="display: inline;"> This paper proposes a detailed prompting flow, termed Table-Logic, to investigate the performance contrasts between bigger and smaller language models (LMs) utilizing step-by-step reasoning methods in the TableQA task. The method processes tasks by sequentially identifying critical columns and rows given question and table with its structure, determining necessary aggregations, calculations, or co&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16002v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16002v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16002v1-abstract-full" style="display: none;"> This paper proposes a detailed prompting flow, termed Table-Logic, to investigate the performance contrasts between bigger and smaller language models (LMs) utilizing step-by-step reasoning methods in the TableQA task. The method processes tasks by sequentially identifying critical columns and rows given question and table with its structure, determining necessary aggregations, calculations, or comparisons, and finally inferring the results to generate a precise prediction. By deploying this method, we observe a 7.8% accuracy improvement in bigger LMs like Llama-3-70B compared to the vanilla on HybridQA, while smaller LMs like Llama-2-7B shows an 11% performance decline. We empirically investigate the potential causes of performance contrasts by exploring the capabilities of bigger and smaller LMs from various dimensions in TableQA task. Our findings highlight the limitations of the step-by-step reasoning method in small models and provide potential insights for making improvements. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16002v1-abstract-full').style.display = 'none'; document.getElementById('2411.16002v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15604">arXiv:2411.15604</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15604">pdf</a>, <a href="https://arxiv.org/format/2411.15604">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> FATE: Full-head Gaussian Avatar with Textural Editing from Monocular Video </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jiawei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zijian Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+Z">Zhiyang Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+Y">Yicheng Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+D">Dongfang Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+Y">Yao Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+X">Xun Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hao Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15604v1-abstract-short" style="display: inline;"> Reconstructing high-fidelity, animatable 3D head avatars from effortlessly captured monocular videos is a pivotal yet formidable challenge. Although significant progress has been made in rendering performance and manipulation capabilities, notable challenges remain, including incomplete reconstruction and inefficient Gaussian representation. To address these challenges, we introduce FATE, a novel&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15604v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15604v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15604v1-abstract-full" style="display: none;"> Reconstructing high-fidelity, animatable 3D head avatars from effortlessly captured monocular videos is a pivotal yet formidable challenge. Although significant progress has been made in rendering performance and manipulation capabilities, notable challenges remain, including incomplete reconstruction and inefficient Gaussian representation. To address these challenges, we introduce FATE, a novel method for reconstructing an editable full-head avatar from a single monocular video. FATE integrates a sampling-based densification strategy to ensure optimal positional distribution of points, improving rendering efficiency. A neural baking technique is introduced to convert discrete Gaussian representations into continuous attribute maps, facilitating intuitive appearance editing. Furthermore, we propose a universal completion framework to recover non-frontal appearance, culminating in a 360$^\circ$-renderable 3D head avatar. FATE outperforms previous approaches in both qualitative and quantitative evaluations, achieving state-of-the-art performance. To the best of our knowledge, FATE is the first animatable and 360$^\circ$ full-head monocular reconstruction method for a 3D head avatar. The code will be publicly released upon publication. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15604v1-abstract-full').style.display = 'none'; document.getElementById('2411.15604v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">project page: https://zjwfufu.github.io/FATE-page/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15576">arXiv:2411.15576</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15576">pdf</a>, <a href="https://arxiv.org/format/2411.15576">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MulModSeg: Enhancing Unpaired Multi-Modal Medical Image Segmentation with Modality-Conditioned Text Embedding and Alternating Training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+C">Chengyin Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hui Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Sultan%2C+R+I">Rafi Ibn Sultan</a>, <a href="/search/cs?searchtype=author&amp;query=Ebadian%2C+H+B">Hassan Bagher Ebadian</a>, <a href="/search/cs?searchtype=author&amp;query=Khanduri%2C+P">Prashant Khanduri</a>, <a href="/search/cs?searchtype=author&amp;query=Indrin%2C+C">Chetty Indrin</a>, <a href="/search/cs?searchtype=author&amp;query=Thind%2C+K">Kundan Thind</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+D">Dongxiao Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15576v1-abstract-short" style="display: inline;"> In the diverse field of medical imaging, automatic segmentation has numerous applications and must handle a wide variety of input domains, such as different types of Computed Tomography (CT) scans and Magnetic Resonance (MR) images. This heterogeneity challenges automatic segmentation algorithms to maintain consistent performance across different modalities due to the requirement for spatially ali&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15576v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15576v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15576v1-abstract-full" style="display: none;"> In the diverse field of medical imaging, automatic segmentation has numerous applications and must handle a wide variety of input domains, such as different types of Computed Tomography (CT) scans and Magnetic Resonance (MR) images. This heterogeneity challenges automatic segmentation algorithms to maintain consistent performance across different modalities due to the requirement for spatially aligned and paired images. Typically, segmentation models are trained using a single modality, which limits their ability to generalize to other types of input data without employing transfer learning techniques. Additionally, leveraging complementary information from different modalities to enhance segmentation precision often necessitates substantial modifications to popular encoder-decoder designs, such as introducing multiple branched encoding or decoding paths for each modality. In this work, we propose a simple Multi-Modal Segmentation (MulModSeg) strategy to enhance medical image segmentation across multiple modalities, specifically CT and MR. It incorporates two key designs: a modality-conditioned text embedding framework via a frozen text encoder that adds modality awareness to existing segmentation frameworks without significant structural modifications or computational overhead, and an alternating training procedure that facilitates the integration of essential features from unpaired CT and MR inputs. Through extensive experiments with both Fully Convolutional Network and Transformer-based backbones, MulModSeg consistently outperforms previous methods in segmenting abdominal multi-organ and cardiac substructures for both CT and MR modalities. The code is available in this {\href{https://github.com/ChengyinLee/MulModSeg_2024}{link}}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15576v1-abstract-full').style.display = 'none'; document.getElementById('2411.15576v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by WACV-2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15504">arXiv:2411.15504</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15504">pdf</a>, <a href="https://arxiv.org/format/2411.15504">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Effects of Muscle Synergy during Overhead Work with a Passive Shoulder Exoskeleton: A Case Study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tian%2C+J">Jin Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+B">Baichun Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+C">Chifu Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+S">Suo Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+J">Jiadong Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+P">Ping Li</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Changbing Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yingjie Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haiqi Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Yi%2C+C">Chunzhi Yi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15504v1-abstract-short" style="display: inline;"> Objective: Shoulder exoskeletons can effectively assist with overhead work. However, their impacts on muscle synergy remain unclear. The objective is to systematically investigate the effects of the shoulder exoskeleton on muscle synergies during overhead work.Methods: Eight male participants were recruited to perform a screwing task both with (Intervention) and without (Normal) the exoskeleton. E&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15504v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15504v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15504v1-abstract-full" style="display: none;"> Objective: Shoulder exoskeletons can effectively assist with overhead work. However, their impacts on muscle synergy remain unclear. The objective is to systematically investigate the effects of the shoulder exoskeleton on muscle synergies during overhead work.Methods: Eight male participants were recruited to perform a screwing task both with (Intervention) and without (Normal) the exoskeleton. Eight muscles were monitored and muscle synergies were extracted using non-negative matrix factorization and electromyographic topographic maps. Results: The number of synergies extracted was the same (n = 2) in both conditions. Specifically, the first synergies in both conditions were identical, with the highest weight of AD and MD; while the second synergies were different between conditions, with highest weight of PM and MD, respectively. As for the first synergy in the Intervention condition, the activation profile significantly decreased, and the average recruitment level and activation duration were significantly lower (p&lt;0.05). The regression analysis for the muscle synergies across conditions shows the changes of muscle synergies did not influence the sparseness of muscle synergies (p=0.7341). In the topographic maps, the mean value exhibited a significant decrease (p&lt;0.001) and the entropy significantly increased (p&lt;0.01). Conclusion: The exoskeleton does not alter the number of synergies and existing major synergies but may induce new synergies. It can also significantly decrease neural activation and may influence the heterogeneity of the distribution of monitored muscle activations. Significance: This study provides insights into the potential mechanisms of exoskeleton-assisted overhead work and guidance on improving the performance of exoskeletons. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15504v1-abstract-full').style.display = 'none'; document.getElementById('2411.15504v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14713">arXiv:2411.14713</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14713">pdf</a>, <a href="https://arxiv.org/format/2411.14713">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> LIBER: Lifelong User Behavior Modeling Based on Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+C">Chenxu Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Quan%2C+S">Shigang Quan</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+B">Bo Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+J">Jianghao Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Cai%2C+X">Xiaoling Cai</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hong Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xiangyang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Xi%2C+Y">Yunjia Xi</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Weinan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+R">Ruiming Tang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14713v1-abstract-short" style="display: inline;"> CTR prediction plays a vital role in recommender systems. Recently, large language models (LLMs) have been applied in recommender systems due to their emergence abilities. While leveraging semantic information from LLMs has shown some improvements in the performance of recommender systems, two notable limitations persist in these studies. First, LLM-enhanced recommender systems encounter challenge&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14713v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14713v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14713v1-abstract-full" style="display: none;"> CTR prediction plays a vital role in recommender systems. Recently, large language models (LLMs) have been applied in recommender systems due to their emergence abilities. While leveraging semantic information from LLMs has shown some improvements in the performance of recommender systems, two notable limitations persist in these studies. First, LLM-enhanced recommender systems encounter challenges in extracting valuable information from lifelong user behavior sequences within textual contexts for recommendation tasks. Second, the inherent variability in human behaviors leads to a constant stream of new behaviors and irregularly fluctuating user interests. This characteristic imposes two significant challenges on existing models. On the one hand, it presents difficulties for LLMs in effectively capturing the dynamic shifts in user interests within these sequences, and on the other hand, there exists the issue of substantial computational overhead if the LLMs necessitate recurrent calls upon each update to the user sequences. In this work, we propose Lifelong User Behavior Modeling (LIBER) based on large language models, which includes three modules: (1) User Behavior Streaming Partition (UBSP), (2) User Interest Learning (UIL), and (3) User Interest Fusion (UIF). Initially, UBSP is employed to condense lengthy user behavior sequences into shorter partitions in an incremental paradigm, facilitating more efficient processing. Subsequently, UIL leverages LLMs in a cascading way to infer insights from these partitions. Finally, UIF integrates the textual outputs generated by the aforementioned processes to construct a comprehensive representation, which can be incorporated by any recommendation model to enhance performance. LIBER has been deployed on Huawei&#39;s music recommendation service and achieved substantial improvements in users&#39; play count and play time by 3.01% and 7.69%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14713v1-abstract-full').style.display = 'none'; document.getElementById('2411.14713v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14519">arXiv:2411.14519</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14519">pdf</a>, <a href="https://arxiv.org/format/2411.14519">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Tra-MoE: Learning Trajectory Prediction Model from Multiple Domains for Adaptive Policy Conditioning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jiange Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haoyi Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yating Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+G">Gangshan Wu</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+T">Tong He</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Limin Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14519v1-abstract-short" style="display: inline;"> Learning from multiple domains is a primary factor that influences the generalization of a single unified robot system. In this paper, we aim to learn the trajectory prediction model by using broad out-of-domain data to improve its performance and generalization ability. Trajectory model is designed to predict any-point trajectories in the current frame given an instruction and can provide detaile&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14519v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14519v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14519v1-abstract-full" style="display: none;"> Learning from multiple domains is a primary factor that influences the generalization of a single unified robot system. In this paper, we aim to learn the trajectory prediction model by using broad out-of-domain data to improve its performance and generalization ability. Trajectory model is designed to predict any-point trajectories in the current frame given an instruction and can provide detailed control guidance for robotic policy learning. To handle the diverse out-of-domain data distribution, we propose a sparsely-gated MoE (\textbf{Top-1} gating strategy) architecture for trajectory model, coined as \textbf{Tra-MoE}. The sparse activation design enables good balance between parameter cooperation and specialization, effectively benefiting from large-scale out-of-domain data while maintaining constant FLOPs per token. In addition, we further introduce an adaptive policy conditioning technique by learning 2D mask representations for predicted trajectories, which is explicitly aligned with image observations to guide action prediction more flexibly. We perform extensive experiments on both simulation and real-world scenarios to verify the effectiveness of Tra-MoE and adaptive policy conditioning technique. We also conduct a comprehensive empirical study to train Tra-MoE, demonstrating that our Tra-MoE consistently exhibits superior performance compared to the dense baseline model, even when the latter is scaled to match Tra-MoE&#39;s parameter count. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14519v1-abstract-full').style.display = 'none'; document.getElementById('2411.14519v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13770">arXiv:2411.13770</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13770">pdf</a>, <a href="https://arxiv.org/format/2411.13770">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> A Novel Passive Occupational Shoulder Exoskeleton With Adjustable Peak Assistive Torque Angle For Overhead Tasks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tian%2C+J">Jin Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haiqi Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+C">Changjia Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+C">Chifu Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yingjie Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+B">Baichun Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Yi%2C+C">Chunzhi Yi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13770v2-abstract-short" style="display: inline;"> Objective: Overhead tasks are a primary inducement to work-related musculoskeletal disorders. Aiming to reduce shoulder physical loads, passive shoulder exoskeletons are increasingly prevalent in the industry due to their lightweight, affordability, and effectiveness. However, they can only accommodate a specific task and cannot effectively balance between compactness and sufficient range of motio&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13770v2-abstract-full').style.display = 'inline'; document.getElementById('2411.13770v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13770v2-abstract-full" style="display: none;"> Objective: Overhead tasks are a primary inducement to work-related musculoskeletal disorders. Aiming to reduce shoulder physical loads, passive shoulder exoskeletons are increasingly prevalent in the industry due to their lightweight, affordability, and effectiveness. However, they can only accommodate a specific task and cannot effectively balance between compactness and sufficient range of motion. Method: We proposed a novel passive occupational shoulder exoskeleton to handle various overhead tasks with different arm elevation angles and ensured a sufficient ROM while compactness. By formulating kinematic models and simulations, an ergonomic shoulder structure was developed. Then, we presented a torque generator equipped with an adjustable peak assistive torque angle to switch between low and high assistance phases through a passive clutch mechanism. Ten healthy participants were recruited to validate its functionality by performing the screwing task. Results: Measured range of motion results demonstrated that the exoskeleton can ensure a sufficient ROM in both sagittal (164掳) and horizontal (158掳) flexion/extension movements. The experimental results of the screwing task showed that the exoskeleton could reduce muscle activation (up to 49.6%), perceived effort and frustration, and provide an improved user experience (scored 79.7 out of 100). Conclusion: These results indicate that the proposed exoskeleton can guarantee natural movements and provide efficient assistance during overhead work, and thus have the potential to reduce the risk of musculoskeletal disorders. Significance: The proposed exoskeleton provides insights into multi-task adaptability and efficient assistance, highlighting the potential for expanding the application of exoskeletons. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13770v2-abstract-full').style.display = 'none'; document.getElementById('2411.13770v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13291">arXiv:2411.13291</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13291">pdf</a>, <a href="https://arxiv.org/format/2411.13291">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DATAP-SfM: Dynamic-Aware Tracking Any Point for Robust Structure from Motion in the Wild </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ye%2C+W">Weicai Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xinyu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhan%2C+R">Ruohao Zhan</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+D">Di Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+X">Xiaoshui Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haoyi Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Bao%2C+H">Hujun Bao</a>, <a href="/search/cs?searchtype=author&amp;query=Ouyang%2C+W">Wanli Ouyang</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+T">Tong He</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+G">Guofeng Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13291v1-abstract-short" style="display: inline;"> This paper proposes a concise, elegant, and robust pipeline to estimate smooth camera trajectories and obtain dense point clouds for casual videos in the wild. Traditional frameworks, such as ParticleSfM~\cite{zhao2022particlesfm}, address this problem by sequentially computing the optical flow between adjacent frames to obtain point trajectories. They then remove dynamic trajectories through moti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13291v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13291v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13291v1-abstract-full" style="display: none;"> This paper proposes a concise, elegant, and robust pipeline to estimate smooth camera trajectories and obtain dense point clouds for casual videos in the wild. Traditional frameworks, such as ParticleSfM~\cite{zhao2022particlesfm}, address this problem by sequentially computing the optical flow between adjacent frames to obtain point trajectories. They then remove dynamic trajectories through motion segmentation and perform global bundle adjustment. However, the process of estimating optical flow between two adjacent frames and chaining the matches can introduce cumulative errors. Additionally, motion segmentation combined with single-view depth estimation often faces challenges related to scale ambiguity. To tackle these challenges, we propose a dynamic-aware tracking any point (DATAP) method that leverages consistent video depth and point tracking. Specifically, our DATAP addresses these issues by estimating dense point tracking across the video sequence and predicting the visibility and dynamics of each point. By incorporating the consistent video depth prior, the performance of motion segmentation is enhanced. With the integration of DATAP, it becomes possible to estimate and optimize all camera poses simultaneously by performing global bundle adjustments for point tracking classified as static and visible, rather than relying on incremental camera registration. Extensive experiments on dynamic sequences, e.g., Sintel and TUM RGBD dynamic sequences, and on the wild video, e.g., DAVIS, demonstrate that the proposed method achieves state-of-the-art performance in terms of camera pose estimation even in complex dynamic challenge scenes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13291v1-abstract-full').style.display = 'none'; document.getElementById('2411.13291v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12791">arXiv:2411.12791</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12791">pdf</a>, <a href="https://arxiv.org/format/2411.12791">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Mitigating Perception Bias: A Training-Free Approach to Enhance LMM for Image Quality Assessment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pan%2C+S">Siyi Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+B">Baoliang Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+D">Danni Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hanwei Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+L">Lingyu Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Sui%2C+X">Xiangjie Sui</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shiqi Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12791v1-abstract-short" style="display: inline;"> Despite the impressive performance of large multimodal models (LMMs) in high-level visual tasks, their capacity for image quality assessment (IQA) remains limited. One main reason is that LMMs are primarily trained for high-level tasks (e.g., image captioning), emphasizing unified image semantics extraction under varied quality. Such semantic-aware yet quality-insensitive perception bias inevitabl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12791v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12791v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12791v1-abstract-full" style="display: none;"> Despite the impressive performance of large multimodal models (LMMs) in high-level visual tasks, their capacity for image quality assessment (IQA) remains limited. One main reason is that LMMs are primarily trained for high-level tasks (e.g., image captioning), emphasizing unified image semantics extraction under varied quality. Such semantic-aware yet quality-insensitive perception bias inevitably leads to a heavy reliance on image semantics when those LMMs are forced for quality rating. In this paper, instead of retraining or tuning an LMM costly, we propose a training-free debiasing framework, in which the image quality prediction is rectified by mitigating the bias caused by image semantics. Specifically, we first explore several semantic-preserving distortions that can significantly degrade image quality while maintaining identifiable semantics. By applying these specific distortions to the query or test images, we ensure that the degraded images are recognized as poor quality while their semantics remain. During quality inference, both a query image and its corresponding degraded version are fed to the LMM along with a prompt indicating that the query image quality should be inferred under the condition that the degraded one is deemed poor quality.This prior condition effectively aligns the LMM&#39;s quality perception, as all degraded images are consistently rated as poor quality, regardless of their semantic difference.Finally, the quality scores of the query image inferred under different prior conditions (degraded versions) are aggregated using a conditional probability model. Extensive experiments on various IQA datasets show that our debiasing framework could consistently enhance the LMM performance and the code will be publicly available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12791v1-abstract-full').style.display = 'none'; document.getElementById('2411.12791v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11406">arXiv:2411.11406</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11406">pdf</a>, <a href="https://arxiv.org/format/2411.11406">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Bridging the Resource Gap: Deploying Advanced Imitation Learning Models onto Affordable Embedded Platforms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ge%2C+H">Haizhou Ge</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+R">Ruixiang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Z">Zhu-ang Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hongrui Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+R">Ruichen Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+Y">Yuhang Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Pang%2C+Z">Zeyu Pang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+G">Guyue Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Junyu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+L">Lu Shi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11406v1-abstract-short" style="display: inline;"> Advanced imitation learning with structures like the transformer is increasingly demonstrating its advantages in robotics. However, deploying these large-scale models on embedded platforms remains a major challenge. In this paper, we propose a pipeline that facilitates the migration of advanced imitation learning algorithms to edge devices. The process is achieved via an efficient model compressio&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11406v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11406v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11406v1-abstract-full" style="display: none;"> Advanced imitation learning with structures like the transformer is increasingly demonstrating its advantages in robotics. However, deploying these large-scale models on embedded platforms remains a major challenge. In this paper, we propose a pipeline that facilitates the migration of advanced imitation learning algorithms to edge devices. The process is achieved via an efficient model compression method and a practical asynchronous parallel method Temporal Ensemble with Dropped Actions (TEDA) that enhances the smoothness of operations. To show the efficiency of the proposed pipeline, large-scale imitation learning models are trained on a server and deployed on an edge device to complete various manipulation tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11406v1-abstract-full').style.display = 'none'; document.getElementById('2411.11406v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by the 2024 IEEE International Conference on Robotics and Biomimetics (IEEE ROBIO 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10821">arXiv:2411.10821</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10821">pdf</a>, <a href="https://arxiv.org/format/2411.10821">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Biomolecules">q-bio.BM</span> </div> </div> <p class="title is-5 mathjax"> GeomCLIP: Contrastive Geometry-Text Pre-training for Molecules </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+T">Teng Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+C">Chao Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Huaisheng Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Honavar%2C+V+G">Vasant G. Honavar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10821v1-abstract-short" style="display: inline;"> Pretraining molecular representations is crucial for drug and material discovery. Recent methods focus on learning representations from geometric structures, effectively capturing 3D position information. Yet, they overlook the rich information in biomedical texts, which detail molecules&#39; properties and substructures. With this in mind, we set up a data collection effort for 200K pairs of ground-s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10821v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10821v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10821v1-abstract-full" style="display: none;"> Pretraining molecular representations is crucial for drug and material discovery. Recent methods focus on learning representations from geometric structures, effectively capturing 3D position information. Yet, they overlook the rich information in biomedical texts, which detail molecules&#39; properties and substructures. With this in mind, we set up a data collection effort for 200K pairs of ground-state geometric structures and biomedical texts, resulting in a PubChem3D dataset. Based on this dataset, we propose the GeomCLIP framework to enhance for multi-modal representation learning from molecular structures and biomedical text. During pre-training, we design two types of tasks, i.e., multimodal representation alignment and unimodal denoising pretraining, to align the 3D geometric encoder with textual information and, at the same time, preserve its original representation power. Experimental results show the effectiveness of GeomCLIP in various tasks such as molecular property prediction, zero-shot text-molecule retrieval, and 3D molecule captioning. Our code and collected dataset are available at \url{https://github.com/xiaocui3737/GeomCLIP} <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10821v1-abstract-full').style.display = 'none'; document.getElementById('2411.10821v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">BIBM 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09850">arXiv:2411.09850</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09850">pdf</a>, <a href="https://arxiv.org/format/2411.09850">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Enhancing Diffusion Posterior Sampling for Inverse Problems by Integrating Crafted Measurements </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+S">Shijie Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Huaisheng Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+R">Rohan Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+R">Ruiyi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+K">Kaiyi Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Changyou Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09850v1-abstract-short" style="display: inline;"> Diffusion models have emerged as a powerful foundation model for visual generation. With an appropriate sampling process, it can effectively serve as a generative prior to solve general inverse problems. Current posterior sampling based methods take the measurement (i.e., degraded image sample) into the posterior sampling to infer the distribution of the target data (i.e., clean image sample). How&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09850v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09850v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09850v1-abstract-full" style="display: none;"> Diffusion models have emerged as a powerful foundation model for visual generation. With an appropriate sampling process, it can effectively serve as a generative prior to solve general inverse problems. Current posterior sampling based methods take the measurement (i.e., degraded image sample) into the posterior sampling to infer the distribution of the target data (i.e., clean image sample). However, in this manner, we show that high-frequency information can be prematurely introduced during the early stages, which could induce larger posterior estimate errors during the restoration sampling. To address this issue, we first reveal that forming the log posterior gradient with the noisy measurement ( i.e., samples from a diffusion forward process) instead of the clean one can benefit the reverse process. Consequently, we propose a novel diffusion posterior sampling method DPS-CM, which incorporates a Crafted Measurement (i.e., samples generated by a reverse denoising process, compared to random sampling with noise in standard methods) to form the posterior estimate. This integration aims to mitigate the misalignment with the diffusion prior caused by cumulative posterior estimate errors. Experimental results demonstrate that our approach significantly improves the overall capacity to solve general and noisy inverse problems, such as Gaussian deblurring, super-resolution, inpainting, nonlinear deblurring, and tasks with Poisson noise, relative to existing approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09850v1-abstract-full').style.display = 'none'; document.getElementById('2411.09850v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06542">arXiv:2411.06542</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06542">pdf</a>, <a href="https://arxiv.org/format/2411.06542">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Is Linear Feedback on Smoothed Dynamics Sufficient for Stabilizing Contact-Rich Plans? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shirai%2C+Y">Yuki Shirai</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+T">Tong Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Suh%2C+H+J+T">H. J. Terry Suh</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Huaijiang Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Ni%2C+X">Xinpei Ni</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jiuguang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Simchowitz%2C+M">Max Simchowitz</a>, <a href="/search/cs?searchtype=author&amp;query=Pang%2C+T">Tao Pang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06542v2-abstract-short" style="display: inline;"> Designing planners and controllers for contact-rich manipulation is extremely challenging as contact violates the smoothness conditions that many gradient-based controller synthesis tools assume. Contact smoothing approximates a non-smooth system with a smooth one, allowing one to use these synthesis tools more effectively. However, applying classical control synthesis methods to smoothed contact&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06542v2-abstract-full').style.display = 'inline'; document.getElementById('2411.06542v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06542v2-abstract-full" style="display: none;"> Designing planners and controllers for contact-rich manipulation is extremely challenging as contact violates the smoothness conditions that many gradient-based controller synthesis tools assume. Contact smoothing approximates a non-smooth system with a smooth one, allowing one to use these synthesis tools more effectively. However, applying classical control synthesis methods to smoothed contact dynamics remains relatively under-explored. This paper analyzes the efficacy of linear controller synthesis using differential simulators based on contact smoothing. We introduce natural baselines for leveraging contact smoothing to compute (a) open-loop plans robust to uncertain conditions and/or dynamics, and (b) feedback gains to stabilize around open-loop plans. Using robotic bimanual whole-body manipulation as a testbed, we perform extensive empirical experiments on over 300 trajectories and analyze why LQR seems insufficient for stabilizing contact-rich plans. The video summarizing this paper and hardware experiments is found here: https://youtu.be/HLaKi6qbwQg?si=_zCAmBBD6rGSitm9. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06542v2-abstract-full').style.display = 'none'; document.getElementById('2411.06542v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under review for ICRA2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06096">arXiv:2411.06096</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06096">pdf</a>, <a href="https://arxiv.org/format/2411.06096">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> ZhoBLiMP: a Systematic Assessment of Language Models with Linguistic Minimal Pairs in Chinese </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yikang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+Y">Yeting Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hongao Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+L">Lilong Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Qian%2C+Z">Zhiheng Qian</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+S">Siyuan Song</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+K">Kejia Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+J">Jialong Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+P">Pei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+B">Baosong Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+R">Rui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+H">Hai Hu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06096v1-abstract-short" style="display: inline;"> Whether and how language models (LMs) acquire the syntax of natural languages has been widely evaluated under the minimal pair paradigm. However, a lack of wide-coverage benchmarks in languages other than English has constrained systematic investigations into the issue. Addressing it, we first introduce ZhoBLiMP, the most comprehensive benchmark of linguistic minimal pairs for Chinese to date, wit&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06096v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06096v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06096v1-abstract-full" style="display: none;"> Whether and how language models (LMs) acquire the syntax of natural languages has been widely evaluated under the minimal pair paradigm. However, a lack of wide-coverage benchmarks in languages other than English has constrained systematic investigations into the issue. Addressing it, we first introduce ZhoBLiMP, the most comprehensive benchmark of linguistic minimal pairs for Chinese to date, with 118 paradigms, covering 15 linguistic phenomena. We then train 20 LMs of different sizes (14M to 1.4B) on Chinese corpora of various volumes (100M to 3B tokens) and evaluate them along with 14 off-the-shelf LLMs on ZhoBLiMP. The overall results indicate that Chinese grammar can be mostly learned by models with around 500M parameters, trained on 1B tokens with one epoch, showing limited benefits for further scaling. Most (N=95) linguistic paradigms are of easy or medium difficulty for LMs, while there are still 13 paradigms that remain challenging even for models with up to 32B parameters. In regard to how LMs acquire Chinese grammar, we observe a U-shaped learning pattern in several phenomena, similar to those observed in child language acquisition. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06096v1-abstract-full').style.display = 'none'; document.getElementById('2411.06096v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05706">arXiv:2411.05706</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05706">pdf</a>, <a href="https://arxiv.org/format/2411.05706">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Image2Text2Image: A Novel Framework for Label-Free Evaluation of Image-to-Text Generation with Text-to-Image Diffusion Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+J">Jia-Hong Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hongyi Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+Y">Yixian Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Rudinac%2C+S">Stevan Rudinac</a>, <a href="/search/cs?searchtype=author&amp;query=Kanoulas%2C+E">Evangelos Kanoulas</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05706v1-abstract-short" style="display: inline;"> Evaluating the quality of automatically generated image descriptions is a complex task that requires metrics capturing various dimensions, such as grammaticality, coverage, accuracy, and truthfulness. Although human evaluation provides valuable insights, its cost and time-consuming nature pose limitations. Existing automated metrics like BLEU, ROUGE, METEOR, and CIDEr attempt to fill this gap, but&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05706v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05706v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05706v1-abstract-full" style="display: none;"> Evaluating the quality of automatically generated image descriptions is a complex task that requires metrics capturing various dimensions, such as grammaticality, coverage, accuracy, and truthfulness. Although human evaluation provides valuable insights, its cost and time-consuming nature pose limitations. Existing automated metrics like BLEU, ROUGE, METEOR, and CIDEr attempt to fill this gap, but they often exhibit weak correlations with human judgment. To address this challenge, we propose a novel evaluation framework called Image2Text2Image, which leverages diffusion models, such as Stable Diffusion or DALL-E, for text-to-image generation. In the Image2Text2Image framework, an input image is first processed by a selected image captioning model, chosen for evaluation, to generate a textual description. Using this generated description, a diffusion model then creates a new image. By comparing features extracted from the original and generated images, we measure their similarity using a designated similarity metric. A high similarity score suggests that the model has produced a faithful textual description, while a low score highlights discrepancies, revealing potential weaknesses in the model&#39;s performance. Notably, our framework does not rely on human-annotated reference captions, making it a valuable tool for assessing image captioning models. Extensive experiments and human evaluations validate the efficacy of our proposed Image2Text2Image evaluation framework. The code and dataset will be published to support further research in the community. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05706v1-abstract-full').style.display = 'none'; document.getElementById('2411.05706v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:2408.01723</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05292">arXiv:2411.05292</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05292">pdf</a>, <a href="https://arxiv.org/format/2411.05292">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> SimpleBEV: Improved LiDAR-Camera Fusion Architecture for 3D Object Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yun Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+Z">Zhan Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+P">Peiru Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hong Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+S">Shaohua Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05292v1-abstract-short" style="display: inline;"> More and more research works fuse the LiDAR and camera information to improve the 3D object detection of the autonomous driving system. Recently, a simple yet effective fusion framework has achieved an excellent detection performance, fusing the LiDAR and camera features in a unified bird&#39;s-eye-view (BEV) space. In this paper, we propose a LiDAR-camera fusion framework, named SimpleBEV, for accura&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05292v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05292v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05292v1-abstract-full" style="display: none;"> More and more research works fuse the LiDAR and camera information to improve the 3D object detection of the autonomous driving system. Recently, a simple yet effective fusion framework has achieved an excellent detection performance, fusing the LiDAR and camera features in a unified bird&#39;s-eye-view (BEV) space. In this paper, we propose a LiDAR-camera fusion framework, named SimpleBEV, for accurate 3D object detection, which follows the BEV-based fusion framework and improves the camera and LiDAR encoders, respectively. Specifically, we perform the camera-based depth estimation using a cascade network and rectify the depth results with the depth information derived from the LiDAR points. Meanwhile, an auxiliary branch that implements the 3D object detection using only the camera-BEV features is introduced to exploit the camera information during the training phase. Besides, we improve the LiDAR feature extractor by fusing the multi-scaled sparse convolutional features. Experimental results demonstrate the effectiveness of our proposed method. Our method achieves 77.6\% NDS accuracy on the nuScenes dataset, showcasing superior performance in the 3D object detection track. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05292v1-abstract-full').style.display = 'none'; document.getElementById('2411.05292v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04907">arXiv:2411.04907</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04907">pdf</a>, <a href="https://arxiv.org/format/2411.04907">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Enhancing Missing Data Imputation through Combined Bipartite Graph and Complete Directed Graph </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhaoyang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hongtu Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Ziqi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yingjie Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Shu%2C+H">Hai Shu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04907v1-abstract-short" style="display: inline;"> In this paper, we aim to address a significant challenge in the field of missing data imputation: identifying and leveraging the interdependencies among features to enhance missing data imputation for tabular data. We introduce a novel framework named the Bipartite and Complete Directed Graph Neural Network (BCGNN). Within BCGNN, observations and features are differentiated as two distinct node ty&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04907v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04907v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04907v1-abstract-full" style="display: none;"> In this paper, we aim to address a significant challenge in the field of missing data imputation: identifying and leveraging the interdependencies among features to enhance missing data imputation for tabular data. We introduce a novel framework named the Bipartite and Complete Directed Graph Neural Network (BCGNN). Within BCGNN, observations and features are differentiated as two distinct node types, and the values of observed features are converted into attributed edges linking them. The bipartite segment of our framework inductively learns embedding representations for nodes, efficiently utilizing the comprehensive information encapsulated in the attributed edges. In parallel, the complete directed graph segment adeptly outlines and communicates the complex interdependencies among features. When compared to contemporary leading imputation methodologies, BCGNN consistently outperforms them, achieving a noteworthy average reduction of 15% in mean absolute error for feature imputation tasks under different missing mechanisms. Our extensive experimental investigation confirms that an in-depth grasp of the interdependence structure substantially enhances the model&#39;s feature embedding ability. We also highlight the model&#39;s superior performance in label prediction tasks involving missing data, and its formidable ability to generalize to unseen data points. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04907v1-abstract-full').style.display = 'none'; document.getElementById('2411.04907v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04899">arXiv:2411.04899</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04899">pdf</a>, <a href="https://arxiv.org/format/2411.04899">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Sampling-guided Heterogeneous Graph Neural Network with Temporal Smoothing for Scalable Longitudinal Data Imputation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhaoyang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Ziqi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Q">Qiao Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+J">Jinhan Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hongtu Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04899v1-abstract-short" style="display: inline;"> In this paper, we propose a novel framework, the Sampling-guided Heterogeneous Graph Neural Network (SHT-GNN), to effectively tackle the challenge of missing data imputation in longitudinal studies. Unlike traditional methods, which often require extensive preprocessing to handle irregular or inconsistent missing data, our approach accommodates arbitrary missing data patterns while maintaining com&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04899v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04899v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04899v1-abstract-full" style="display: none;"> In this paper, we propose a novel framework, the Sampling-guided Heterogeneous Graph Neural Network (SHT-GNN), to effectively tackle the challenge of missing data imputation in longitudinal studies. Unlike traditional methods, which often require extensive preprocessing to handle irregular or inconsistent missing data, our approach accommodates arbitrary missing data patterns while maintaining computational efficiency. SHT-GNN models both observations and covariates as distinct node types, connecting observation nodes at successive time points through subject-specific longitudinal subnetworks, while covariate-observation interactions are represented by attributed edges within bipartite graphs. By leveraging subject-wise mini-batch sampling and a multi-layer temporal smoothing mechanism, SHT-GNN efficiently scales to large datasets, while effectively learning node representations and imputing missing data. Extensive experiments on both synthetic and real-world datasets, including the Alzheimer&#39;s Disease Neuroimaging Initiative (ADNI) dataset, demonstrate that SHT-GNN significantly outperforms existing imputation methods, even with high missing data rates. The empirical results highlight SHT-GNN&#39;s robust imputation capabilities and superior performance, particularly in the context of complex, large-scale longitudinal data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04899v1-abstract-full').style.display = 'none'; document.getElementById('2411.04899v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04711">arXiv:2411.04711</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04711">pdf</a>, <a href="https://arxiv.org/format/2411.04711">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Progressive Multi-Level Alignments for Semi-Supervised Domain Adaptation SAR Target Recognition Using Simulated Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xinzheng Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hui Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhuang%2C+H">Hongqian Zhuang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04711v1-abstract-short" style="display: inline;"> Recently, an intriguing research trend for automatic target recognition (ATR) from synthetic aperture radar (SAR) imagery has arisen: using simulated data to train ATR models is a feasible solution to the issue of inadequate measured data. To close the domain gap that exists between the real and simulated data, the unsupervised domain adaptation (UDA) techniques are frequently exploited to constru&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04711v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04711v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04711v1-abstract-full" style="display: none;"> Recently, an intriguing research trend for automatic target recognition (ATR) from synthetic aperture radar (SAR) imagery has arisen: using simulated data to train ATR models is a feasible solution to the issue of inadequate measured data. To close the domain gap that exists between the real and simulated data, the unsupervised domain adaptation (UDA) techniques are frequently exploited to construct ATR models. However, for UDA, the target domain lacks labeled data to direct the model training, posing a great challenge to ATR performance. To address the above problem, a semi-supervised domain adaptation (SSDA) framework has been proposed adopting progressive multi-level alignments for simulated data-aided SAR ATR. First, a progressive wavelet transform data augmentation (PWTDA) is presented by analyzing the discrepancies of wavelet decomposition sub-bands of two domain images, obtaining the domain-level alignment. Specifically, the domain gap is narrowed by mixing the wavelet transform high-frequency sub-band components. Second, we develop an asymptotic instance-prototype alignment (AIPA) strategy to push the source domain instances close to the corresponding target prototypes, aiming to achieve category-level alignment. Moreover, the consistency alignment is implemented by excavating the strong-weak augmentation consistency of both individual samples and the multi-sample relationship, enhancing the generalization capability of the model. Extensive experiments on the Synthetic and Measured Paired Labeled Experiment (SAMPLE) dataset, indicate that our approach obtains recognition accuracies of 99.63% and 98.91% in two common experimental settings with only one labeled sample per class of the target domain, outperforming the most advanced SSDA techniques. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04711v1-abstract-full').style.display = 'none'; document.getElementById('2411.04711v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04571">arXiv:2411.04571</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04571">pdf</a>, <a href="https://arxiv.org/format/2411.04571">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DomainGallery: Few-shot Domain-driven Image Generation by Attribute-centric Finetuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+Y">Yuxuan Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+Y">Yan Hong</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+B">Bo Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Lan%2C+J">Jun Lan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Huijia Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Weiqiang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jianfu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Niu%2C+L">Li Niu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Liqing Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04571v1-abstract-short" style="display: inline;"> The recent progress in text-to-image models pretrained on large-scale datasets has enabled us to generate various images as long as we provide a text prompt describing what we want. Nevertheless, the availability of these models is still limited when we expect to generate images that fall into a specific domain either hard to describe or just unseen to the models. In this work, we propose DomainGa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04571v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04571v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04571v1-abstract-full" style="display: none;"> The recent progress in text-to-image models pretrained on large-scale datasets has enabled us to generate various images as long as we provide a text prompt describing what we want. Nevertheless, the availability of these models is still limited when we expect to generate images that fall into a specific domain either hard to describe or just unseen to the models. In this work, we propose DomainGallery, a few-shot domain-driven image generation method which aims at finetuning pretrained Stable Diffusion on few-shot target datasets in an attribute-centric manner. Specifically, DomainGallery features prior attribute erasure, attribute disentanglement, regularization and enhancement. These techniques are tailored to few-shot domain-driven generation in order to solve key issues that previous works have failed to settle. Extensive experiments are given to validate the superior performance of DomainGallery on a variety of domain-driven generation scenarios. Codes are available at https://github.com/Ldhlwh/DomainGallery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04571v1-abstract-full').style.display = 'none'; document.getElementById('2411.04571v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NeurIPS 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03839">arXiv:2411.03839</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.03839">pdf</a>, <a href="https://arxiv.org/format/2411.03839">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Discrete Mathematics">cs.DM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> </div> </div> <p class="title is-5 mathjax"> Noisy Linear Group Testing: Exact Thresholds and Efficient Algorithms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hintze%2C+L">Lukas Hintze</a>, <a href="/search/cs?searchtype=author&amp;query=Krieg%2C+L">Lena Krieg</a>, <a href="/search/cs?searchtype=author&amp;query=Scheftelowitsch%2C+O">Olga Scheftelowitsch</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haodong Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03839v2-abstract-short" style="display: inline;"> In group testing, the task is to identify defective items by testing groups of them together using as few tests as possible. We consider the setting where each item is defective with a constant probability $伪$, independent of all other items. In the (over-)idealized noiseless setting, tests are positive exactly if any of the tested items are defective. We study a more realistic model in which obse&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03839v2-abstract-full').style.display = 'inline'; document.getElementById('2411.03839v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03839v2-abstract-full" style="display: none;"> In group testing, the task is to identify defective items by testing groups of them together using as few tests as possible. We consider the setting where each item is defective with a constant probability $伪$, independent of all other items. In the (over-)idealized noiseless setting, tests are positive exactly if any of the tested items are defective. We study a more realistic model in which observed test results are subject to noise, i.e., tests can display false positive or false negative results with constant positive probabilities. We determine precise constants $c$ such that $cn\log n$ tests are required to recover the infection status of every individual for both adaptive and non-adaptive group testing: in the former, the selection of groups to test can depend on previously observed test results, whereas it cannot in the latter. Additionally, for both settings, we provide efficient algorithms that identify all defective items with the optimal amount of tests with high probability. Thus, we completely solve the problem of binary noisy group testing in the studied setting. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03839v2-abstract-full').style.display = 'none'; document.getElementById('2411.03839v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 05C80; 62B10; 68P30; 68R05 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> F.2.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03527">arXiv:2411.03527</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.03527">pdf</a>, <a href="https://arxiv.org/format/2411.03527">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> PACE: Pacing Operator Learning to Accurate Optical Field Simulation for Complicated Photonic Devices </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hanqing Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Cong%2C+W">Wenyan Cong</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+G">Guojin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Ning%2C+S">Shupeng Ning</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+R+T">Ray T. Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+J">Jiaqi Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+D+Z">David Z. Pan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03527v1-abstract-short" style="display: inline;"> Electromagnetic field simulation is central to designing, optimizing, and validating photonic devices and circuits. However, costly computation associated with numerical simulation poses a significant bottleneck, hindering scalability and turnaround time in the photonic circuit design process. Neural operators offer a promising alternative, but existing SOTA approaches, NeurOLight, struggle with p&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03527v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03527v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03527v1-abstract-full" style="display: none;"> Electromagnetic field simulation is central to designing, optimizing, and validating photonic devices and circuits. However, costly computation associated with numerical simulation poses a significant bottleneck, hindering scalability and turnaround time in the photonic circuit design process. Neural operators offer a promising alternative, but existing SOTA approaches, NeurOLight, struggle with predicting high-fidelity fields for real-world complicated photonic devices, with the best reported 0.38 normalized mean absolute error in NeurOLight. The inter-plays of highly complex light-matter interaction, e.g., scattering and resonance, sensitivity to local structure details, non-uniform learning complexity for full-domain simulation, and rich frequency information, contribute to the failure of existing neural PDE solvers. In this work, we boost the prediction fidelity to an unprecedented level for simulating complex photonic devices with a novel operator design driven by the above challenges. We propose a novel cross-axis factorized PACE operator with a strong long-distance modeling capacity to connect the full-domain complex field pattern with local device structures. Inspired by human learning, we further divide and conquer the simulation task for extremely hard cases into two progressively easy tasks, with a first-stage model learning an initial solution refined by a second model. On various complicated photonic device benchmarks, we demonstrate one sole PACE model is capable of achieving 73% lower error with 50% fewer parameters compared with various recent ML for PDE solvers. The two-stage setup further advances high-fidelity simulation for even more intricate cases. In terms of runtime, PACE demonstrates 154-577x and 11.8-12x simulation speedup over numerical solver using scipy or highly-optimized pardiso solver, respectively. We open sourced the code and dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03527v1-abstract-full').style.display = 'none'; document.getElementById('2411.03527v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepeted by Neurips 2024, 21 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03404">arXiv:2411.03404</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.03404">pdf</a>, <a href="https://arxiv.org/format/2411.03404">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> EVA-S3PC: Efficient, Verifiable, Accurate Secure Matrix Multiplication Protocol Assembly and Its Application in Regression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Peng%2C+S">Shizhao Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+T">Tianrui Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Tao%2C+T">Tianle Tao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+D">Derun Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Sheng%2C+H">Hao Sheng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haogang Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03404v1-abstract-short" style="display: inline;"> Efficient multi-party secure matrix multiplication is crucial for privacy-preserving machine learning, but existing mixed-protocol frameworks often face challenges in balancing security, efficiency, and accuracy. This paper presents an efficient, verifiable and accurate secure three-party computing (EVA-S3PC) framework that addresses these challenges with elementary 2-party and 3-party matrix oper&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03404v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03404v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03404v1-abstract-full" style="display: none;"> Efficient multi-party secure matrix multiplication is crucial for privacy-preserving machine learning, but existing mixed-protocol frameworks often face challenges in balancing security, efficiency, and accuracy. This paper presents an efficient, verifiable and accurate secure three-party computing (EVA-S3PC) framework that addresses these challenges with elementary 2-party and 3-party matrix operations based on data obfuscation techniques. We propose basic protocols for secure matrix multiplication, inversion, and hybrid multiplication, ensuring privacy and result verifiability. Experimental results demonstrate that EVA-S3PC achieves up to 14 significant decimal digits of precision in Float64 calculations, while reducing communication overhead by up to $54.8\%$ compared to state of art methods. Furthermore, 3-party regression models trained using EVA-S3PC on vertically partitioned data achieve accuracy nearly identical to plaintext training, which illustrates its potential in scalable, efficient, and accurate solution for secure collaborative modeling across domains. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03404v1-abstract-full').style.display = 'none'; document.getElementById('2411.03404v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages,22 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02553">arXiv:2411.02553</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02553">pdf</a>, <a href="https://arxiv.org/format/2411.02553">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3636534.3649386">10.1145/3636534.3649386 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Map++: Towards User-Participatory Visual SLAM Systems with Efficient Map Expansion and Sharing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xinran Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hanqi Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+Y">Yifan Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Wuyang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Shangguan%2C+L">Longfei Shangguan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+J">Jianmin Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yanyong Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02553v1-abstract-short" style="display: inline;"> Constructing precise 3D maps is crucial for the development of future map-based systems such as self-driving and navigation. However, generating these maps in complex environments, such as multi-level parking garages or shopping malls, remains a formidable challenge. In this paper, we introduce a participatory sensing approach that delegates map-building tasks to map users, thereby enabling cost-e&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02553v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02553v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02553v1-abstract-full" style="display: none;"> Constructing precise 3D maps is crucial for the development of future map-based systems such as self-driving and navigation. However, generating these maps in complex environments, such as multi-level parking garages or shopping malls, remains a formidable challenge. In this paper, we introduce a participatory sensing approach that delegates map-building tasks to map users, thereby enabling cost-effective and continuous data collection. The proposed method harnesses the collective efforts of users, facilitating the expansion and ongoing update of the maps as the environment evolves. We realized this approach by developing Map++, an efficient system that functions as a plug-and-play extension, supporting participatory map-building based on existing SLAM algorithms. Map++ addresses a plethora of scalability issues in this participatory map-building system by proposing a set of lightweight, application-layer protocols. We evaluated Map++ in four representative settings: an indoor garage, an outdoor plaza, a public SLAM benchmark, and a simulated environment. The results demonstrate that Map++ can reduce traffic volume by approximately 46% with negligible degradation in mapping accuracy, i.e., less than 0.03m compared to the baseline system. It can support approximately $2 \times$ as many concurrent users as the baseline under the same network bandwidth. Additionally, for users who travel on already-mapped trajectories, they can directly utilize the existing maps for localization and save 47% of the CPU usage. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02553v1-abstract-full').style.display = 'none'; document.getElementById('2411.02553v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 15 figures. Accepted by MobiCom 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02446">arXiv:2411.02446</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02446">pdf</a>, <a href="https://arxiv.org/format/2411.02446">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Learning World Models for Unconstrained Goal Navigation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+Y">Yuanlin Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Mao%2C+W">Wensen Mao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">He Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02446v1-abstract-short" style="display: inline;"> Learning world models offers a promising avenue for goal-conditioned reinforcement learning with sparse rewards. By allowing agents to plan actions or exploratory goals without direct interaction with the environment, world models enhance exploration efficiency. The quality of a world model hinges on the richness of data stored in the agent&#39;s replay buffer, with expectations of reasonable generali&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02446v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02446v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02446v1-abstract-full" style="display: none;"> Learning world models offers a promising avenue for goal-conditioned reinforcement learning with sparse rewards. By allowing agents to plan actions or exploratory goals without direct interaction with the environment, world models enhance exploration efficiency. The quality of a world model hinges on the richness of data stored in the agent&#39;s replay buffer, with expectations of reasonable generalization across the state space surrounding recorded trajectories. However, challenges arise in generalizing learned world models to state transitions backward along recorded trajectories or between states across different trajectories, hindering their ability to accurately model real-world dynamics. To address these challenges, we introduce a novel goal-directed exploration algorithm, MUN (short for &#34;World Models for Unconstrained Goal Navigation&#34;). This algorithm is capable of modeling state transitions between arbitrary subgoal states in the replay buffer, thereby facilitating the learning of policies to navigate between any &#34;key&#34; states. Experimental results demonstrate that MUN strengthens the reliability of world models and significantly improves the policy&#39;s capacity to generalize across new goal settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02446v1-abstract-full').style.display = 'none'; document.getElementById('2411.02446v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NeurIPS2024 Poster. arXiv admin note: substantial text overlap with arXiv:2411.01396</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02310">arXiv:2411.02310</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02310">pdf</a>, <a href="https://arxiv.org/format/2411.02310">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> MdEval: Massively Multilingual Code Debugging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shukai Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chai%2C+L">Linzheng Chai</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jian Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+J">Jiajun Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">He Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Liran Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+K">Ke Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Wei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hualei Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+S">Shuyue Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+T">Tao Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Jiaheng Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+Y">Yunlong Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Hao%2C+Y">Yu Hao</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+L">Liqun Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Niu%2C+G">Guanglin Niu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+G">Ge Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zhoujun Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02310v1-abstract-short" style="display: inline;"> Code large language models (LLMs) have made significant progress in code debugging by directly generating the correct code based on the buggy code snippet. Programming benchmarks, typically consisting of buggy code snippet and their associated test cases, are used to assess the debugging capabilities of LLMs. However, many existing benchmarks primarily focus on Python and are often limited in term&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02310v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02310v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02310v1-abstract-full" style="display: none;"> Code large language models (LLMs) have made significant progress in code debugging by directly generating the correct code based on the buggy code snippet. Programming benchmarks, typically consisting of buggy code snippet and their associated test cases, are used to assess the debugging capabilities of LLMs. However, many existing benchmarks primarily focus on Python and are often limited in terms of language diversity (e.g., DebugBench and DebugEval). To advance the field of multilingual debugging with LLMs, we propose the first massively multilingual debugging benchmark, which includes 3.6K test samples of 18 programming languages and covers the automated program repair (APR) task, the code review (CR) task, and the bug identification (BI) task. Further, we introduce the debugging instruction corpora MDEVAL-INSTRUCT by injecting bugs into the correct multilingual queries and solutions (xDebugGen). Further, a multilingual debugger xDebugCoder trained on MDEVAL-INSTRUCT as a strong baseline specifically to handle the bugs of a wide range of programming languages (e.g. &#34;Missing Mut&#34; in language Rust and &#34;Misused Macro Definition&#34; in language C). Our extensive experiments on MDEVAL reveal a notable performance gap between open-source models and closed-source LLMs (e.g., GPT and Claude series), highlighting huge room for improvement in multilingual code debugging scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02310v1-abstract-full').style.display = 'none'; document.getElementById('2411.02310v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01791">arXiv:2411.01791</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01791">pdf</a>, <a href="https://arxiv.org/format/2411.01791">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Minder: Faulty Machine Detection for Large-scale Distributed Model Training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+Y">Yangtao Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+X">Xiang Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhuo Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xingjian Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Lei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+B">Bo Li</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+Z">Zuquan Song</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hang Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+G">Gaohong Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+F">Fuliang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shuguang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+H">Haibin Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+J">Jianxi Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+M">Minlan Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01791v1-abstract-short" style="display: inline;"> Large-scale distributed model training requires simultaneous training on up to thousands of machines. Faulty machine detection is critical when an unexpected fault occurs in a machine. From our experience, a training task can encounter two faults per day on average, possibly leading to a halt for hours. To address the drawbacks of the time-consuming and labor-intensive manual scrutiny, we propose&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01791v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01791v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01791v1-abstract-full" style="display: none;"> Large-scale distributed model training requires simultaneous training on up to thousands of machines. Faulty machine detection is critical when an unexpected fault occurs in a machine. From our experience, a training task can encounter two faults per day on average, possibly leading to a halt for hours. To address the drawbacks of the time-consuming and labor-intensive manual scrutiny, we propose Minder, an automatic faulty machine detector for distributed training tasks. The key idea of Minder is to automatically and efficiently detect faulty distinctive monitoring metric patterns, which could last for a period before the entire training task comes to a halt. Minder has been deployed in our production environment for over one year, monitoring daily distributed training tasks where each involves up to thousands of machines. In our real-world fault detection scenarios, Minder can accurately and efficiently react to faults within 3.6 seconds on average, with a precision of 0.904 and F1-score of 0.893. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01791v1-abstract-full').style.display = 'none'; document.getElementById('2411.01791v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01396">arXiv:2411.01396</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01396">pdf</a>, <a href="https://arxiv.org/format/2411.01396">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Exploring the Edges of Latent State Clusters for Goal-Conditioned Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duan%2C+Y">Yuanlin Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+G">Guofeng Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">He Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01396v1-abstract-short" style="display: inline;"> Exploring unknown environments efficiently is a fundamental challenge in unsupervised goal-conditioned reinforcement learning. While selecting exploratory goals at the frontier of previously explored states is an effective strategy, the policy during training may still have limited capability of reaching rare goals on the frontier, resulting in reduced exploratory behavior. We propose &#34;Cluster Edg&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01396v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01396v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01396v1-abstract-full" style="display: none;"> Exploring unknown environments efficiently is a fundamental challenge in unsupervised goal-conditioned reinforcement learning. While selecting exploratory goals at the frontier of previously explored states is an effective strategy, the policy during training may still have limited capability of reaching rare goals on the frontier, resulting in reduced exploratory behavior. We propose &#34;Cluster Edge Exploration&#34; ($CE^2$), a new goal-directed exploration algorithm that when choosing goals in sparsely explored areas of the state space gives priority to goal states that remain accessible to the agent. The key idea is clustering to group states that are easily reachable from one another by the current policy under training in a latent space and traversing to states holding significant exploration potential on the boundary of these clusters before doing exploratory behavior. In challenging robotics environments including navigating a maze with a multi-legged ant robot, manipulating objects with a robot arm on a cluttered tabletop, and rotating objects in the palm of an anthropomorphic robotic hand, $CE^2$ demonstrates superior efficiency in exploration compared to baseline methods and ablations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01396v1-abstract-full').style.display = 'none'; document.getElementById('2411.01396v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NeurIPS2024 Poster</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.00635">arXiv:2411.00635</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.00635">pdf</a>, <a href="https://arxiv.org/format/2411.00635">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Variational Neural Stochastic Differential Equations with Change Points </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=El-Laham%2C+Y">Yousef El-Laham</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Z">Zhongchang Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haibei Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Balch%2C+T">Tucker Balch</a>, <a href="/search/cs?searchtype=author&amp;query=Vyetrenko%2C+S">Svitlana Vyetrenko</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.00635v1-abstract-short" style="display: inline;"> In this work, we explore modeling change points in time-series data using neural stochastic differential equations (neural SDEs). We propose a novel model formulation and training procedure based on the variational autoencoder (VAE) framework for modeling time-series as a neural SDE. Unlike existing algorithms training neural SDEs as VAEs, our proposed algorithm only necessitates a Gaussian prior&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00635v1-abstract-full').style.display = 'inline'; document.getElementById('2411.00635v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.00635v1-abstract-full" style="display: none;"> In this work, we explore modeling change points in time-series data using neural stochastic differential equations (neural SDEs). We propose a novel model formulation and training procedure based on the variational autoencoder (VAE) framework for modeling time-series as a neural SDE. Unlike existing algorithms training neural SDEs as VAEs, our proposed algorithm only necessitates a Gaussian prior of the initial state of the latent stochastic process, rather than a Wiener process prior on the entire latent stochastic process. We develop two methodologies for modeling and estimating change points in time-series data with distribution shifts. Our iterative algorithm alternates between updating neural SDE parameters and updating the change points based on either a maximum likelihood-based approach or a change point detection algorithm using the sequential likelihood ratio test. We provide a theoretical analysis of this proposed change point detection scheme. Finally, we present an empirical evaluation that demonstrates the expressive power of our proposed model, showing that it can effectively model both classical parametric SDEs and some real datasets with distribution shifts. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00635v1-abstract-full').style.display = 'none'; document.getElementById('2411.00635v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.23109">arXiv:2410.23109</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.23109">pdf</a>, <a href="https://arxiv.org/format/2410.23109">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Geometry">cs.CG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Graphics">cs.GR</span> </div> </div> <p class="title is-5 mathjax"> NASM: Neural Anisotropic Surface Meshing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Hongbo Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haikuan Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhong%2C+S">Sikai Zhong</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+N">Ningna Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+C">Cheng Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+X">Xiaohu Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Xin%2C+S">Shiqing Xin</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wenping Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Hua%2C+J">Jing Hua</a>, <a href="/search/cs?searchtype=author&amp;query=Zhong%2C+Z">Zichun Zhong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.23109v2-abstract-short" style="display: inline;"> This paper introduces a new learning-based method, NASM, for anisotropic surface meshing. Our key idea is to propose a graph neural network to embed an input mesh into a high-dimensional (high-d) Euclidean embedding space to preserve curvature-based anisotropic metric by using a dot product loss between high-d edge vectors. This can dramatically reduce the computational time and increase the scala&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23109v2-abstract-full').style.display = 'inline'; document.getElementById('2410.23109v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.23109v2-abstract-full" style="display: none;"> This paper introduces a new learning-based method, NASM, for anisotropic surface meshing. Our key idea is to propose a graph neural network to embed an input mesh into a high-dimensional (high-d) Euclidean embedding space to preserve curvature-based anisotropic metric by using a dot product loss between high-d edge vectors. This can dramatically reduce the computational time and increase the scalability. Then, we propose a novel feature-sensitive remeshing on the generated high-d embedding to automatically capture sharp geometric features. We define a high-d normal metric, and then derive an automatic differentiation on a high-d centroidal Voronoi tessellation (CVT) optimization with the normal metric to simultaneously preserve geometric features and curvature anisotropy that exhibit in the original 3D shapes. To our knowledge, this is the first time that a deep learning framework and a large dataset are proposed to construct a high-d Euclidean embedding space for 3D anisotropic surface meshing. Experimental results are evaluated and compared with the state-of-the-art in anisotropic surface meshing on a large number of surface models from Thingi10K dataset as well as tested on extensive unseen 3D shapes from Multi-Garment Network dataset and FAUST human dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23109v2-abstract-full').style.display = 'none'; document.getElementById('2410.23109v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">SIGGRAPH Asia 2024 (Conference Track)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.21663">arXiv:2410.21663</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.21663">pdf</a>, <a href="https://arxiv.org/format/2410.21663">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ICME57554.2024.10687558">10.1109/ICME57554.2024.10687558 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Discriminative Pedestrian Features and Gated Channel Attention for Clothes-Changing Person Re-Identification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ding%2C+Y">Yongkang Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Mao%2C+R">Rui Mao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hanyue Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+A">Anqi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Liyan Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.21663v1-abstract-short" style="display: inline;"> In public safety and social life, the task of Clothes-Changing Person Re-Identification (CC-ReID) has become increasingly significant. However, this task faces considerable challenges due to appearance changes caused by clothing alterations. Addressing this issue, this paper proposes an innovative method for disentangled feature extraction, effectively extracting discriminative features from pedes&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21663v1-abstract-full').style.display = 'inline'; document.getElementById('2410.21663v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.21663v1-abstract-full" style="display: none;"> In public safety and social life, the task of Clothes-Changing Person Re-Identification (CC-ReID) has become increasingly significant. However, this task faces considerable challenges due to appearance changes caused by clothing alterations. Addressing this issue, this paper proposes an innovative method for disentangled feature extraction, effectively extracting discriminative features from pedestrian images that are invariant to clothing. This method leverages pedestrian parsing techniques to identify and retain features closely associated with individual identity while disregarding the variable nature of clothing attributes. Furthermore, this study introduces a gated channel attention mechanism, which, by adjusting the network&#39;s focus, aids the model in more effectively learning and emphasizing features critical for pedestrian identity recognition. Extensive experiments conducted on two standard CC-ReID datasets validate the effectiveness of the proposed approach, with performance surpassing current leading solutions. The Top-1 accuracy under clothing change scenarios on the PRCC and VC-Clothes datasets reached 64.8% and 83.7%, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21663v1-abstract-full').style.display = 'none'; document.getElementById('2410.21663v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The article has been accepted by IEEE International Conference on Multimedia and Expo 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.21157">arXiv:2410.21157</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.21157">pdf</a>, <a href="https://arxiv.org/format/2410.21157">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> M2rc-Eval: Massively Multilingual Repository-level Code Completion Evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Jiaheng Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+K">Ken Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+C">Congnan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jian Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shukai Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">He Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+P">Peng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Chai%2C+L">Linzheng Chai</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yanan Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+K">Ke Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+G">Ge Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zekun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+G">Guoan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Xiang%2C+B">Bangyu Xiang</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+W">Wenbo Su</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+B">Bo Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.21157v1-abstract-short" style="display: inline;"> Repository-level code completion has drawn great attention in software engineering, and several benchmark datasets have been introduced. However, existing repository-level code completion benchmarks usually focus on a limited number of languages (&lt;5), which cannot evaluate the general code intelligence abilities across different languages for existing code Large Language Models (LLMs). Besides, th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21157v1-abstract-full').style.display = 'inline'; document.getElementById('2410.21157v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.21157v1-abstract-full" style="display: none;"> Repository-level code completion has drawn great attention in software engineering, and several benchmark datasets have been introduced. However, existing repository-level code completion benchmarks usually focus on a limited number of languages (&lt;5), which cannot evaluate the general code intelligence abilities across different languages for existing code Large Language Models (LLMs). Besides, the existing benchmarks usually report overall average scores of different languages, where the fine-grained abilities in different completion scenarios are ignored. Therefore, to facilitate the research of code LLMs in multilingual scenarios, we propose a massively multilingual repository-level code completion benchmark covering 18 programming languages (called M2RC-EVAL), and two types of fine-grained annotations (i.e., bucket-level and semantic-level) on different completion scenarios are provided, where we obtain these annotations based on the parsed abstract syntax tree. Moreover, we also curate a massively multilingual instruction corpora M2RC- INSTRUCT dataset to improve the repository-level code completion abilities of existing code LLMs. Comprehensive experimental results demonstrate the effectiveness of our M2RC-EVAL and M2RC-INSTRUCT. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21157v1-abstract-full').style.display = 'none'; document.getElementById('2410.21157v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.20902">arXiv:2410.20902</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.20902">pdf</a>, <a href="https://arxiv.org/format/2410.20902">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> K-step Vector Approximate Survey Propagation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Q">Qun Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Haochuan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Huimin Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.20902v1-abstract-short" style="display: inline;"> Approximate Message Passing (AMP), originally developed to address high-dimensional linear inverse problems, has found widespread applications in signal processing and statistical inference. Among its notable variants, Vector Approximate Message Passing (VAMP), Generalized Approximate Survey Propagation (GASP), and Vector Approximate Survey Propagation (VASP) have demonstrated effectiveness even w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20902v1-abstract-full').style.display = 'inline'; document.getElementById('2410.20902v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.20902v1-abstract-full" style="display: none;"> Approximate Message Passing (AMP), originally developed to address high-dimensional linear inverse problems, has found widespread applications in signal processing and statistical inference. Among its notable variants, Vector Approximate Message Passing (VAMP), Generalized Approximate Survey Propagation (GASP), and Vector Approximate Survey Propagation (VASP) have demonstrated effectiveness even when the assumed generative models differ from the true models. However, many fundamental questions regarding model mismatch remain unanswered. For instance, it is still unclear what level of model mismatch is required for the postulated posterior estimate (PPE) to exhibit a replica symmetry breaking (RSB) structure in the extremum conditions of its free energy, and what order of RSB is necessary. In this paper, we introduce a novel approximate message passing algorithm that incorporates K-step RSB (KRSB) and naturally reduces to VAMP and VASP with specific parameter selections. We refer to this as the K-step VASP (KVASP) algorithm. Simulations show that KVASP significantly outperforms VAMP and GASP in estimation accuracy, particularly when the assumed prior has discrete support and the measurement matrix is non-i.i.d.. Additionally, the state evolution (SE) of KVASP, derived heuristically, accurately tracks the per-iteration mean squared error (MSE). A comparison between the SE and the free energy under the KRSB ansatz reveals that the fixed-point equations of SE align with the saddle-point equations of the free energy. This suggests that, once the KRSB ansatz holds and the SE fixed point is reached, KVASP can accurately compute the PPE in the large system limit (LSL). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20902v1-abstract-full').style.display = 'none'; document.getElementById('2410.20902v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:2311.05111</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.19795">arXiv:2410.19795</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.19795">pdf</a>, <a href="https://arxiv.org/format/2410.19795">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> SANSee: A Physical-layer Semantic-aware Networking Framework for Distributed Wireless Sensing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Huixiang Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+Y">Yong Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yingyu Li</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+G">Guangming Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Krunz%2C+M">Marwan Krunz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.19795v1-abstract-short" style="display: inline;"> Contactless device-free wireless sensing has recently attracted significant interest due to its potential to support a wide range of immersive human-machine interactive applications using ubiquitously available radio frequency (RF) signals. Traditional approaches focus on developing a single global model based on a combined dataset collected from different locations. However, wireless signals are&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.19795v1-abstract-full').style.display = 'inline'; document.getElementById('2410.19795v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.19795v1-abstract-full" style="display: none;"> Contactless device-free wireless sensing has recently attracted significant interest due to its potential to support a wide range of immersive human-machine interactive applications using ubiquitously available radio frequency (RF) signals. Traditional approaches focus on developing a single global model based on a combined dataset collected from different locations. However, wireless signals are known to be location and environment specific. Thus, a global model results in inconsistent and unreliable sensing results. It is also unrealistic to construct individual models for all the possible locations and environmental scenarios. Motivated by the observation that signals recorded at different locations are closely related to a set of physical-layer semantic features, in this paper we propose SANSee, a semantic-aware networking-based framework for distributed wireless sensing. SANSee allows models constructed in one or a limited number of locations to be transferred to new locations without requiring any locally labeled data or model training. SANSee is built on the concept of physical-layer semantic-aware network (pSAN), which characterizes the semantic similarity and the correlations of sensed data across different locations. A pSAN-based zero-shot transfer learning solution is introduced to allow receivers in new locations to obtain location-specific models by directly aggregating the models trained by other receivers. We theoretically prove that models obtained by SANSee can approach the locally optimal models. Experimental results based on real-world datasets are used to verify that the accuracy of the transferred models obtained by SANSee matches that of the models trained by the locally labeled data based on supervised learning approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.19795v1-abstract-full').style.display = 'none'; document.getElementById('2410.19795v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted at IEEE Transactions on Mobile Computing</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.19694">arXiv:2410.19694</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.19694">pdf</a>, <a href="https://arxiv.org/format/2410.19694">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Less is More: Extreme Gradient Boost Rank-1 Adaption for Efficient Finetuning of LLMs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yifei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hao Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+A">Aiwei Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+H">Han Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Koniusz%2C+P">Piotr Koniusz</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+I">Irwin King</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.19694v1-abstract-short" style="display: inline;"> Fine-tuning Large Language Models (LLMs) has become a crucial technique for adapting pre-trained models to downstream tasks. However, the enormous size of LLMs poses significant challenges in terms of computational complexity and resource requirements. Low-Rank Adaptation (LoRA) has emerged as a promising solution. However, there exists a gap between the practical performance of low-rank adaptatio&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.19694v1-abstract-full').style.display = 'inline'; document.getElementById('2410.19694v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.19694v1-abstract-full" style="display: none;"> Fine-tuning Large Language Models (LLMs) has become a crucial technique for adapting pre-trained models to downstream tasks. However, the enormous size of LLMs poses significant challenges in terms of computational complexity and resource requirements. Low-Rank Adaptation (LoRA) has emerged as a promising solution. However, there exists a gap between the practical performance of low-rank adaptations and its theoretical optimum. In this work, we propose eXtreme Gradient Boosting LoRA (XGBLoRA), a novel framework that bridges this gap by leveraging the power of ensemble learning. Inspired by gradient boosting, XGBLoRA iteratively learns and merges a sequence of LoRA adaptations to refine model predictions. It achieves better performance than the standard LoRA, while enjoying the computational efficiency of rank-1 adaptations. We provide theoretical analysis to show the convergence and optimality of our approach, and conduct extensive experiments on a range of natural language processing tasks. The results demonstrate that XGBLoRA consistently outperforms standard LoRA and achieves performance comparable to full fine-tuning with significantly fewer trainable parameters. This work advances parameter-efficient fine-tuning for LLMs, and offers a promising solution for adapting LLMs to downstream tasks while optimizing performance and efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.19694v1-abstract-full').style.display = 'none'; document.getElementById('2410.19694v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.18135">arXiv:2410.18135</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.18135">pdf</a>, <a href="https://arxiv.org/format/2410.18135">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> R2Gen-Mamba: A Selective State Space Model for Radiology Report Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Y">Yongheng Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+Y+Z">Yueh Z. Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Woodard%2C+G+A">Genevieve A. Woodard</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hongtu Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Lian%2C+C">Chunfeng Lian</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Mingxia Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.18135v1-abstract-short" style="display: inline;"> Radiology report generation is crucial in medical imaging,but the manual annotation process by physicians is time-consuming and labor-intensive, necessitating the develop-ment of automatic report generation methods. Existingresearch predominantly utilizes Transformers to generateradiology reports, which can be computationally intensive,limiting their use in real applications. In this work, we pres&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18135v1-abstract-full').style.display = 'inline'; document.getElementById('2410.18135v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.18135v1-abstract-full" style="display: none;"> Radiology report generation is crucial in medical imaging,but the manual annotation process by physicians is time-consuming and labor-intensive, necessitating the develop-ment of automatic report generation methods. Existingresearch predominantly utilizes Transformers to generateradiology reports, which can be computationally intensive,limiting their use in real applications. In this work, we presentR2Gen-Mamba, a novel automatic radiology report genera-tion method that leverages the efficient sequence processingof the Mamba with the contextual benefits of Transformerarchitectures. Due to lower computational complexity ofMamba, R2Gen-Mamba not only enhances training and in-ference efficiency but also produces high-quality reports.Experimental results on two benchmark datasets with morethan 210,000 X-ray image-report pairs demonstrate the ef-fectiveness of R2Gen-Mamba regarding report quality andcomputational efficiency compared with several state-of-the-art methods. The source code can be accessed online. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18135v1-abstract-full').style.display = 'none'; document.getElementById('2410.18135v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages pages for ISBI2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.17484">arXiv:2410.17484</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.17484">pdf</a>, <a href="https://arxiv.org/format/2410.17484">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Which Client is Reliable?: A Reliable and Personalized Prompt-based Federated Learning for Medical Image Question Answering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">He Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Togo%2C+R">Ren Togo</a>, <a href="/search/cs?searchtype=author&amp;query=Ogawa%2C+T">Takahiro Ogawa</a>, <a href="/search/cs?searchtype=author&amp;query=Haseyama%2C+M">Miki Haseyama</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.17484v1-abstract-short" style="display: inline;"> Conventional medical artificial intelligence (AI) models face barriers in clinical application and ethical issues owing to their inability to handle the privacy-sensitive characteristics of medical data. We present a novel personalized federated learning (pFL) method for medical visual question answering (VQA) models, addressing privacy reliability challenges in the medical domain. Our method intr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.17484v1-abstract-full').style.display = 'inline'; document.getElementById('2410.17484v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.17484v1-abstract-full" style="display: none;"> Conventional medical artificial intelligence (AI) models face barriers in clinical application and ethical issues owing to their inability to handle the privacy-sensitive characteristics of medical data. We present a novel personalized federated learning (pFL) method for medical visual question answering (VQA) models, addressing privacy reliability challenges in the medical domain. Our method introduces learnable prompts into a Transformer architecture to efficiently train it on diverse medical datasets without massive computational costs. Then we introduce a reliable client VQA model that incorporates Dempster-Shafer evidence theory to quantify uncertainty in predictions, enhancing the model&#39;s reliability. Furthermore, we propose a novel inter-client communication mechanism that uses maximum likelihood estimation to balance accuracy and uncertainty, fostering efficient integration of insights across clients. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.17484v1-abstract-full').style.display = 'none'; document.getElementById('2410.17484v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.16942">arXiv:2410.16942</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.16942">pdf</a>, <a href="https://arxiv.org/format/2410.16942">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DiP-GO: A Diffusion Pruner via Few-step Gradient Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haowei Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+D">Dehua Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Ji Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+M">Mingjie Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+J">Jintu Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+J">Jinzhang Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+D">Dong Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+F">Fan Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+L">Lu Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Tiwari%2C+S">Spandan Tiwari</a>, <a href="/search/cs?searchtype=author&amp;query=Sirasao%2C+A">Ashish Sirasao</a>, <a href="/search/cs?searchtype=author&amp;query=Yong%2C+J">Jun-Hai Yong</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+B">Bin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Barsoum%2C+E">Emad Barsoum</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.16942v1-abstract-short" style="display: inline;"> Diffusion models have achieved remarkable progress in the field of image generation due to their outstanding capabilities. However, these models require substantial computing resources because of the multi-step denoising process during inference. While traditional pruning methods have been employed to optimize these models, the retraining process necessitates large-scale training datasets and exte&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.16942v1-abstract-full').style.display = 'inline'; document.getElementById('2410.16942v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.16942v1-abstract-full" style="display: none;"> Diffusion models have achieved remarkable progress in the field of image generation due to their outstanding capabilities. However, these models require substantial computing resources because of the multi-step denoising process during inference. While traditional pruning methods have been employed to optimize these models, the retraining process necessitates large-scale training datasets and extensive computational costs to maintain generalization ability, making it neither convenient nor efficient. Recent studies attempt to utilize the similarity of features across adjacent denoising stages to reduce computational costs through simple and static strategies. However, these strategies cannot fully harness the potential of the similar feature patterns across adjacent timesteps. In this work, we propose a novel pruning method that derives an efficient diffusion model via a more intelligent and differentiable pruner. At the core of our approach is casting the model pruning process into a SubNet search process. Specifically, we first introduce a SuperNet based on standard diffusion via adding some backup connections built upon the similar features. We then construct a plugin pruner network and design optimization losses to identify redundant computation. Finally, our method can identify an optimal SubNet through few-step gradient optimization and a simple post-processing procedure. We conduct extensive experiments on various diffusion models including Stable Diffusion series and DiTs. Our DiP-GO approach achieves 4.4 x speedup for SD-1.5 without any loss of accuracy, significantly outperforming the previous state-of-the-art methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.16942v1-abstract-full').style.display = 'none'; document.getElementById('2410.16942v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.16487">arXiv:2410.16487</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.16487">pdf</a>, <a href="https://arxiv.org/format/2410.16487">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Adventures with Grace Hopper AI Super Chip and the National Research Platform </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hurt%2C+J+A">J. Alex Hurt</a>, <a href="/search/cs?searchtype=author&amp;query=Scott%2C+G+J">Grant J. Scott</a>, <a href="/search/cs?searchtype=author&amp;query=Weitzel%2C+D">Derek Weitzel</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Huijun Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.16487v1-abstract-short" style="display: inline;"> The National Science Foundation (NSF) funded National Research Platform (NRP) is a hyper-converged cluster of nationally and globally interconnected heterogeneous computing resources. The dominant computing environment of the NRP is the x86 64 instruction set architecture (ISA), often with graphics processing units (GPUs). Researchers across the nation leverage containers and Kubernetes to execute&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.16487v1-abstract-full').style.display = 'inline'; document.getElementById('2410.16487v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.16487v1-abstract-full" style="display: none;"> The National Science Foundation (NSF) funded National Research Platform (NRP) is a hyper-converged cluster of nationally and globally interconnected heterogeneous computing resources. The dominant computing environment of the NRP is the x86 64 instruction set architecture (ISA), often with graphics processing units (GPUs). Researchers across the nation leverage containers and Kubernetes to execute high-throughput computing (HTC) workloads across the heterogeneous cyberinfrastructure with minimal friction and maximum flexibility. As part of the NSF-funded GP-ENGINE project, we stood up the first server with an NVIDIA Grace Hopper AI Chip (GH200), an alternative ARM ISA, for the NRP. This presents challenges, as containers must be specifically built for ARM versus x86 64. Herein, we describe the challenges encountered, as well as our resulting solutions and some relevant performance benchmarks. We specifically compare the GH200 to A100 for computer vision workloads, within compute nodes in the NRP. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.16487v1-abstract-full').style.display = 'none'; document.getElementById('2410.16487v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.15312">arXiv:2410.15312</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.15312">pdf</a>, <a href="https://arxiv.org/format/2410.15312">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Synergistic Dual Spatial-aware Generation of Image-to-Text and Text-to-Image </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yu Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Fei%2C+H">Hao Fei</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xiangtai Li</a>, <a href="/search/cs?searchtype=author&amp;query=Qin%2C+L">Libo Qin</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+J">Jiayi Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hongyuan Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+M">Meishan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+M">Min Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+J">Jianguo Wei</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.15312v1-abstract-short" style="display: inline;"> In the visual spatial understanding (VSU) area, spatial image-to-text (SI2T) and spatial text-to-image (ST2I) are two fundamental tasks that appear in dual form. Existing methods for standalone SI2T or ST2I perform imperfectly in spatial understanding, due to the difficulty of 3D-wise spatial feature modeling. In this work, we consider modeling the SI2T and ST2I together under a dual learning fram&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15312v1-abstract-full').style.display = 'inline'; document.getElementById('2410.15312v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.15312v1-abstract-full" style="display: none;"> In the visual spatial understanding (VSU) area, spatial image-to-text (SI2T) and spatial text-to-image (ST2I) are two fundamental tasks that appear in dual form. Existing methods for standalone SI2T or ST2I perform imperfectly in spatial understanding, due to the difficulty of 3D-wise spatial feature modeling. In this work, we consider modeling the SI2T and ST2I together under a dual learning framework. During the dual framework, we then propose to represent the 3D spatial scene features with a novel 3D scene graph (3DSG) representation that can be shared and beneficial to both tasks. Further, inspired by the intuition that the easier 3D$\to$image and 3D$\to$text processes also exist symmetrically in the ST2I and SI2T, respectively, we propose the Spatial Dual Discrete Diffusion (SD$^3$) framework, which utilizes the intermediate features of the 3D$\to$X processes to guide the hard X$\to$3D processes, such that the overall ST2I and SI2T will benefit each other. On the visual spatial understanding dataset VSD, our system outperforms the mainstream T2I and I2T methods significantly. Further in-depth analysis reveals how our dual learning strategy advances. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15312v1-abstract-full').style.display = 'none'; document.getElementById('2410.15312v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.13187">arXiv:2410.13187</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.13187">pdf</a>, <a href="https://arxiv.org/format/2410.13187">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> aiXcoder-7B: A Lightweight and Effective Large Language Model for Code Completion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+S">Siyuan Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+J">Jia Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zong%2C+H">He Zong</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Huanyu Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hao Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+S">Shukai Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+E">Erlu Li</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+J">Jiazheng Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+Y">Yu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Ning%2C+W">Wei Ning</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+G">Gen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+Y">Yihong Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+K">Kechi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+G">Ge Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.13187v2-abstract-short" style="display: inline;"> Large Language Models (LLMs) have been widely used in code completion, and researchers are focusing on scaling up LLMs to improve their accuracy. However, larger LLMs will increase the response time of code completion and decrease the developers&#39; productivity. In this paper, we propose a lightweight and effective LLM for code completion named aiXcoder-7B. Compared to existing LLMs, aiXcoder-7B ach&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.13187v2-abstract-full').style.display = 'inline'; document.getElementById('2410.13187v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.13187v2-abstract-full" style="display: none;"> Large Language Models (LLMs) have been widely used in code completion, and researchers are focusing on scaling up LLMs to improve their accuracy. However, larger LLMs will increase the response time of code completion and decrease the developers&#39; productivity. In this paper, we propose a lightweight and effective LLM for code completion named aiXcoder-7B. Compared to existing LLMs, aiXcoder-7B achieves higher code completion accuracy while having smaller scales (i.e., 7 billion parameters). We attribute the superiority of aiXcoder-7B to three key factors: (1) Multi-objective training. We employ three training objectives, one of which is our proposed Structured Fill-In-the-Middle (SFIM). SFIM considers the syntax structures in code and effectively improves the performance of LLMs for code. (2) Diverse data sampling strategies. They consider inter-file relationships and enhance the capability of LLMs in understanding cross-file contexts. (3) Extensive high-quality data. We establish a rigorous data collection pipeline and consume a total of 1.2 trillion unique tokens for training aiXcoder-7B. This vast volume of data enables aiXcoder-7B to learn a broad distribution of code. We evaluate aiXcoder-7B in five popular code completion benchmarks and a new benchmark collected by this paper. The results show that aiXcoder-7B outperforms the latest six LLMs with similar sizes and even surpasses four larger LLMs (e.g., StarCoder2-15B and CodeLlama-34B), positioning aiXcoder-7B as a lightweight and effective LLM for academia and industry. Finally, we summarize three valuable insights for helping practitioners train the next generations of LLMs for code. aiXcoder-7B has been open-souced and gained significant attention. As of the submission date, aiXcoder-7B has received 2,193 GitHub Stars. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.13187v2-abstract-full').style.display = 'none'; document.getElementById('2410.13187v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">aiXcoder-7B is available at https://github.com/aixcoder-plugin/aiXcoder-7B</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.11076">arXiv:2410.11076</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.11076">pdf</a>, <a href="https://arxiv.org/format/2410.11076">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> PRACTIQ: A Practical Conversational Text-to-SQL dataset with Ambiguous and Unanswerable Queries </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dong%2C+M">Mingwen Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Kumar%2C+N+A">Nischal Ashok Kumar</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Y">Yiqun Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Chauhan%2C+A">Anuj Chauhan</a>, <a href="/search/cs?searchtype=author&amp;query=Hang%2C+C">Chung-Wei Hang</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+S">Shuaichen Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+L">Lin Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Lan%2C+W">Wuwei Lan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Henghui Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+J">Jiarong Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Ng%2C+P">Patrick Ng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhiguo Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.11076v1-abstract-short" style="display: inline;"> Previous text-to-SQL datasets and systems have primarily focused on user questions with clear intentions that can be answered. However, real user questions can often be ambiguous with multiple interpretations or unanswerable due to a lack of relevant data. In this work, we construct a practical conversational text-to-SQL dataset called PRACTIQ, consisting of ambiguous and unanswerable questions in&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11076v1-abstract-full').style.display = 'inline'; document.getElementById('2410.11076v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.11076v1-abstract-full" style="display: none;"> Previous text-to-SQL datasets and systems have primarily focused on user questions with clear intentions that can be answered. However, real user questions can often be ambiguous with multiple interpretations or unanswerable due to a lack of relevant data. In this work, we construct a practical conversational text-to-SQL dataset called PRACTIQ, consisting of ambiguous and unanswerable questions inspired by real-world user questions. We first identified four categories of ambiguous questions and four categories of unanswerable questions by studying existing text-to-SQL datasets. Then, we generate conversations with four turns: the initial user question, an assistant response seeking clarification, the user&#39;s clarification, and the assistant&#39;s clarified SQL response with the natural language explanation of the execution results. For some ambiguous queries, we also directly generate helpful SQL responses, that consider multiple aspects of ambiguity, instead of requesting user clarification. To benchmark the performance on ambiguous, unanswerable, and answerable questions, we implemented large language model (LLM)-based baselines using various LLMs. Our approach involves two steps: question category classification and clarification SQL prediction. Our experiments reveal that state-of-the-art systems struggle to handle ambiguous and unanswerable questions effectively. We will release our code for data generation and experiments on GitHub. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11076v1-abstract-full').style.display = 'none'; document.getElementById('2410.11076v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.10873">arXiv:2410.10873</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.10873">pdf</a>, <a href="https://arxiv.org/format/2410.10873">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> AuditWen:An Open-Source Large Language Model for Audit </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+J">Jiajia Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haoran Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+C">Chao Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhan%2C+T">Tianming Zhan</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Q">Qianqian Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+J">Jimin Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.10873v1-abstract-short" style="display: inline;"> Intelligent auditing represents a crucial advancement in modern audit practices, enhancing both the quality and efficiency of audits within the realm of artificial intelligence. With the rise of large language model (LLM), there is enormous potential for intelligent models to contribute to audit domain. However, general LLMs applied in audit domain face the challenges of lacking specialized knowle&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10873v1-abstract-full').style.display = 'inline'; document.getElementById('2410.10873v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.10873v1-abstract-full" style="display: none;"> Intelligent auditing represents a crucial advancement in modern audit practices, enhancing both the quality and efficiency of audits within the realm of artificial intelligence. With the rise of large language model (LLM), there is enormous potential for intelligent models to contribute to audit domain. However, general LLMs applied in audit domain face the challenges of lacking specialized knowledge and the presence of data biases. To overcome these challenges, this study introduces AuditWen, an open-source audit LLM by fine-tuning Qwen with constructing instruction data from audit domain. We first outline the application scenarios for LLMs in the audit and extract requirements that shape the development of LLMs tailored for audit purposes. We then propose an audit LLM, called AuditWen, by fine-tuning Qwen with constructing 28k instruction dataset from 15 audit tasks and 3 layers. In evaluation stage, we proposed a benchmark with 3k instructions that covers a set of critical audit tasks derived from the application scenarios. With the benchmark, we compare AuditWen with other existing LLMs from information extraction, question answering and document generation. The experimental results demonstrate superior performance of AuditWen both in question understanding and answer generation, making it an immediately valuable tool for audit. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10873v1-abstract-full').style.display = 'none'; document.getElementById('2410.10873v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages,1 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.10093">arXiv:2410.10093</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.10093">pdf</a>, <a href="https://arxiv.org/format/2410.10093">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> How to Leverage Demonstration Data in Alignment for Large Language Model? A Self-Imitation Learning Perspective </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+T">Teng Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Mingxiao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+Y">Yige Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Huaisheng Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+C">Chao Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Honavar%2C+V+G">Vasant G Honavar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.10093v1-abstract-short" style="display: inline;"> This paper introduces a novel generalized self-imitation learning ($\textbf{GSIL}$) framework, which effectively and efficiently aligns large language models with offline demonstration data. We develop $\textbf{GSIL}$ by deriving a surrogate objective of imitation learning with density ratio estimates, facilitating the use of self-generated data and optimizing the imitation learning objective with&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10093v1-abstract-full').style.display = 'inline'; document.getElementById('2410.10093v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.10093v1-abstract-full" style="display: none;"> This paper introduces a novel generalized self-imitation learning ($\textbf{GSIL}$) framework, which effectively and efficiently aligns large language models with offline demonstration data. We develop $\textbf{GSIL}$ by deriving a surrogate objective of imitation learning with density ratio estimates, facilitating the use of self-generated data and optimizing the imitation learning objective with simple classification losses. $\textbf{GSIL}$ eliminates the need for complex adversarial training in standard imitation learning, achieving lightweight and efficient fine-tuning for large language models. In addition, $\textbf{GSIL}$ encompasses a family of offline losses parameterized by a general class of convex functions for density ratio estimation and enables a unified view for alignment with demonstration data. Extensive experiments show that $\textbf{GSIL}$ consistently and significantly outperforms baselines in many challenging benchmarks, such as coding (HuamnEval), mathematical reasoning (GSM8K) and instruction-following benchmark (MT-Bench). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10093v1-abstract-full').style.display = 'none'; document.getElementById('2410.10093v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">EMNLP 2024 Main</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.09958">arXiv:2410.09958</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.09958">pdf</a>, <a href="https://arxiv.org/ps/2410.09958">ps</a>, <a href="https://arxiv.org/format/2410.09958">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Beyond the &#34;Industry Standard&#34;: Focusing Gender-Affirming Voice Training Technologies on Individualized Goal Exploration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Povinelli%2C+K">Kassie Povinelli</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H+%22">Hanxiu &#34;Hazel&#34; Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yuhang Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.09958v2-abstract-short" style="display: inline;"> Gender-affirming voice training is critical for the transition process for many transgender individuals, enabling their voice to align with their gender identity. Individualized voice goals guide and motivate the voice training journey, but existing voice training technologies fail to define clear goals. We interviewed six voice experts and ten transgender individuals with voice training experienc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09958v2-abstract-full').style.display = 'inline'; document.getElementById('2410.09958v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.09958v2-abstract-full" style="display: none;"> Gender-affirming voice training is critical for the transition process for many transgender individuals, enabling their voice to align with their gender identity. Individualized voice goals guide and motivate the voice training journey, but existing voice training technologies fail to define clear goals. We interviewed six voice experts and ten transgender individuals with voice training experience (voice trainees), focusing on how they defined, triangulated, and used voice goals. We found that goal voice exploration involves navigation between approximate and clear goals, and continuous reevaluation throughout the voice training journey. Our study reveals how voice examples, character descriptions, and voice modification and training technologies inform goal exploration, and identifies risks of overemphasizing goals. We identified technological implications informed by the separation of voice goals and targets, and provide a framework for a voice-changer-based goal exploration tool based on brainstorming with trainees and experts. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09958v2-abstract-full').style.display = 'none'; document.getElementById('2410.09958v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 0 figures, 2 tables (main text), 2 tables (appendix)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68U35 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> J.4; J.3; H.5.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.09444">arXiv:2410.09444</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.09444">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Diabetic retinopathy image classification method based on GreenBen data augmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yutong Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+J">Jie Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haijiang Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.09444v1-abstract-short" style="display: inline;"> For the diagnosis of diabetes retinopathy (DR) images, this paper proposes a classification method based on artificial intelligence. The core lies in a new data augmentation method, GreenBen, which first extracts the green channel grayscale image from the retinal image and then performs Ben enhancement. Considering that diabetes macular edema (DME) is a complication closely related to DR, this pap&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09444v1-abstract-full').style.display = 'inline'; document.getElementById('2410.09444v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.09444v1-abstract-full" style="display: none;"> For the diagnosis of diabetes retinopathy (DR) images, this paper proposes a classification method based on artificial intelligence. The core lies in a new data augmentation method, GreenBen, which first extracts the green channel grayscale image from the retinal image and then performs Ben enhancement. Considering that diabetes macular edema (DME) is a complication closely related to DR, this paper constructs a joint classification framework of DR and DME based on multi task learning and attention module, and uses GreenBen to enhance its data to reduce the difference of DR images and improve the accuracy of model classification. We conducted extensive experiments on three publicly available datasets, and our method achieved the best results. For GreenBen, whether based on the ResNet50 network or the Swin Transformer network, whether for individual classification or joint DME classification, compared with other data augmentation methods, GreenBen achieved stable and significant improvements in DR classification results, with an accuracy increase of 10%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09444v1-abstract-full').style.display = 'none'; document.getElementById('2410.09444v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.09103">arXiv:2410.09103</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.09103">pdf</a>, <a href="https://arxiv.org/format/2410.09103">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Parameter-Efficient Fine-Tuning via Selective Discrete Cosine Transform </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shen%2C+Y">Yixian Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Bi%2C+Q">Qi Bi</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+J">Jia-Hong Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hongyi Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Pathania%2C+A">Anuj Pathania</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.09103v1-abstract-short" style="display: inline;"> In the era of large language models, parameter-efficient fine-tuning (PEFT) has been extensively studied. However, these approaches usually rely on the space domain, which encounters storage challenges especially when handling extensive adaptations or larger models. The frequency domain, in contrast, is more effective in compressing trainable parameters while maintaining the expressive capability.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09103v1-abstract-full').style.display = 'inline'; document.getElementById('2410.09103v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.09103v1-abstract-full" style="display: none;"> In the era of large language models, parameter-efficient fine-tuning (PEFT) has been extensively studied. However, these approaches usually rely on the space domain, which encounters storage challenges especially when handling extensive adaptations or larger models. The frequency domain, in contrast, is more effective in compressing trainable parameters while maintaining the expressive capability. In this paper, we propose a novel Selective Discrete Cosine Transformation (sDCTFT) fine-tuning scheme to push this frontier. Its general idea is to exploit the superior energy compaction and decorrelation properties of DCT to improve both model efficiency and accuracy. Specifically, it projects the weight change from the low-rank adaptation into the discrete cosine space. Then, the weight change is partitioned over different levels of the discrete cosine spectrum, and the most critical frequency components in each partition are selected. Extensive experiments on four benchmark datasets demonstrate the superior accuracy, reduced computational cost, and lower storage requirements of the proposed method over the prior arts. For instance, when performing instruction tuning on the LLaMA3.1-8B model, sDCTFT outperforms LoRA with just 0.05M trainable parameters compared to LoRA&#39;s 38.2M, and surpasses FourierFT with 30\% less trainable parameters. The source code will be publicly available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09103v1-abstract-full').style.display = 'none'; document.getElementById('2410.09103v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.08208">arXiv:2410.08208</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.08208">pdf</a>, <a href="https://arxiv.org/format/2410.08208">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> SPA: 3D Spatial-Awareness Enables Effective Embodied Representation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haoyi Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+H">Honghui Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yating Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jiange Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Limin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+T">Tong He</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.08208v2-abstract-short" style="display: inline;"> In this paper, we introduce SPA, a novel representation learning framework that emphasizes the importance of 3D spatial awareness in embodied AI. Our approach leverages differentiable neural rendering on multi-view images to endow a vanilla Vision Transformer (ViT) with intrinsic spatial understanding. We present the most comprehensive evaluation of embodied representation learning to date, coveri&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.08208v2-abstract-full').style.display = 'inline'; document.getElementById('2410.08208v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.08208v2-abstract-full" style="display: none;"> In this paper, we introduce SPA, a novel representation learning framework that emphasizes the importance of 3D spatial awareness in embodied AI. Our approach leverages differentiable neural rendering on multi-view images to endow a vanilla Vision Transformer (ViT) with intrinsic spatial understanding. We present the most comprehensive evaluation of embodied representation learning to date, covering 268 tasks across 8 simulators with diverse policies in both single-task and language-conditioned multi-task scenarios. The results are compelling: SPA consistently outperforms more than 10 state-of-the-art representation methods, including those specifically designed for embodied AI, vision-centric tasks, and multi-modal applications, while using less training data. Furthermore, we conduct a series of real-world experiments to confirm its effectiveness in practical scenarios. These results highlight the critical role of 3D spatial awareness for embodied representation learning. Our strongest model takes more than 6000 GPU hours to train and we are committed to open-sourcing all code and model weights to foster future research in embodied representation learning. Project Page: https://haoyizhu.github.io/spa/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.08208v2-abstract-full').style.display = 'none'; document.getElementById('2410.08208v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project Page: https://haoyizhu.github.io/spa/</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Zhu%2C+H&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Zhu%2C+H&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhu%2C+H&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhu%2C+H&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhu%2C+H&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhu%2C+H&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10