CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 704 results for author: <span class="mathjax">Xu, B</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Xu%2C+B">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Xu, B"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Xu%2C+B&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Xu, B"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Xu%2C+B&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Xu%2C+B&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Xu%2C+B&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Xu%2C+B&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Xu%2C+B&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Xu%2C+B&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16061">arXiv:2411.16061</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16061">pdf</a>, <a href="https://arxiv.org/format/2411.16061">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Scaling Spike-driven Transformer with Efficient Spike Firing Approximation Training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yao%2C+M">Man Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Qiu%2C+X">Xuerui Qiu</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+T">Tianxiang Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+J">Jiakui Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Chou%2C+Y">Yuhong Chou</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+K">Keyu Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Liao%2C+J">Jianxing Liao</a>, <a href="/search/cs?searchtype=author&amp;query=Leng%2C+L">Luziwei Leng</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bo Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+G">Guoqi Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16061v1-abstract-short" style="display: inline;"> The ambition of brain-inspired Spiking Neural Networks (SNNs) is to become a low-power alternative to traditional Artificial Neural Networks (ANNs). This work addresses two major challenges in realizing this vision: the performance gap between SNNs and ANNs, and the high training costs of SNNs. We identify intrinsic flaws in spiking neurons caused by binary firing mechanisms and propose a Spike Fi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16061v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16061v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16061v1-abstract-full" style="display: none;"> The ambition of brain-inspired Spiking Neural Networks (SNNs) is to become a low-power alternative to traditional Artificial Neural Networks (ANNs). This work addresses two major challenges in realizing this vision: the performance gap between SNNs and ANNs, and the high training costs of SNNs. We identify intrinsic flaws in spiking neurons caused by binary firing mechanisms and propose a Spike Firing Approximation (SFA) method using integer training and spike-driven inference. This optimizes the spike firing pattern of spiking neurons, enhancing efficient training, reducing power consumption, improving performance, enabling easier scaling, and better utilizing neuromorphic chips. We also develop an efficient spike-driven Transformer architecture and a spike-masked autoencoder to prevent performance degradation during SNN scaling. On ImageNet-1k, we achieve state-of-the-art top-1 accuracy of 78.5\%, 79.8\%, 84.0\%, and 86.2\% with models containing 10M, 19M, 83M, and 173M parameters, respectively. For instance, the 10M model outperforms the best existing SNN by 7.2\% on ImageNet, with training time acceleration and inference energy efficiency improved by 4.5$\times$ and 3.9$\times$, respectively. We validate the effectiveness and efficiency of the proposed method across various tasks, including object detection, semantic segmentation, and neuromorphic vision tasks. This work enables SNNs to match ANN performance while maintaining the low-power advantage, marking a significant step towards SNNs as a general visual backbone. Code is available at https://github.com/BICLab/Spike-Driven-Transformer-V3. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16061v1-abstract-full').style.display = 'none'; document.getElementById('2411.16061v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15446">arXiv:2411.15446</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15446">pdf</a>, <a href="https://arxiv.org/format/2411.15446">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> freePruner: A Training-free Approach for Large Multimodal Model Acceleration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bingxin Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Shang%2C+Y">Yuzhang Shang</a>, <a href="/search/cs?searchtype=author&amp;query=Ge%2C+Y">Yunhao Ge</a>, <a href="/search/cs?searchtype=author&amp;query=Lou%2C+Q">Qian Lou</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+Y">Yan Yan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15446v1-abstract-short" style="display: inline;"> Large Multimodal Models (LMMs) have demonstrated impressive capabilities in visual-language tasks but face significant deployment challenges due to their high computational demands. While recent token reduction methods show promise for accelerating LMMs, they typically require extensive retraining or fine-tuning, making them impractical for many state-of-the-art models, especially those with propr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15446v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15446v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15446v1-abstract-full" style="display: none;"> Large Multimodal Models (LMMs) have demonstrated impressive capabilities in visual-language tasks but face significant deployment challenges due to their high computational demands. While recent token reduction methods show promise for accelerating LMMs, they typically require extensive retraining or fine-tuning, making them impractical for many state-of-the-art models, especially those with proprietary training data. We propose freePruner, a training-free token reduction approach that can be directly applied to any open-source LMM without additional training. Unlike existing methods that rely heavily on token merging operations, freePruner employs a two-stage token selection strategy: (1) identifying pivotal tokens that capture high-level semantic information using our designed contribution degree metric, and (2) selecting complementary tokens that preserve essential low-level visual details through attention pattern analysis. Extensive experiments demonstrate that freePruner achieves 2x acceleration while maintaining comparable performance across mainstream visual question-answering benchmarks in the training-free setting. Moreover, freePruner is orthogonal to and can be combined with other post-training acceleration techniques, such as post-training quantization, providing a practical solution for efficient LMM deployment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15446v1-abstract-full').style.display = 'none'; document.getElementById('2411.15446v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14717">arXiv:2411.14717</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14717">pdf</a>, <a href="https://arxiv.org/format/2411.14717">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> FedMLLM: Federated Fine-tuning MLLM on Multimodal Heterogeneity Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Binqian Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Shu%2C+X">Xiangbo Shu</a>, <a href="/search/cs?searchtype=author&amp;query=Mei%2C+H">Haiyang Mei</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+G">Guosen Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Fernando%2C+B">Basura Fernando</a>, <a href="/search/cs?searchtype=author&amp;query=Shou%2C+M+Z">Mike Zheng Shou</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+J">Jinhui Tang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14717v1-abstract-short" style="display: inline;"> Multimodal Large Language Models (MLLMs) have made significant advancements, demonstrating powerful capabilities in processing and understanding multimodal data. Fine-tuning MLLMs with Federated Learning (FL) allows for expanding the training data scope by including private data sources, thereby enhancing their practical applicability in privacy-sensitive domains. However, current research remains&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14717v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14717v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14717v1-abstract-full" style="display: none;"> Multimodal Large Language Models (MLLMs) have made significant advancements, demonstrating powerful capabilities in processing and understanding multimodal data. Fine-tuning MLLMs with Federated Learning (FL) allows for expanding the training data scope by including private data sources, thereby enhancing their practical applicability in privacy-sensitive domains. However, current research remains in the early stage, particularly in addressing the \textbf{multimodal heterogeneities} in real-world applications. In this paper, we introduce a benchmark for evaluating various downstream tasks in the federated fine-tuning of MLLMs within multimodal heterogeneous scenarios, laying the groundwork for the research in the field. Our benchmark encompasses two datasets, five comparison baselines, and four multimodal scenarios, incorporating over ten types of modal heterogeneities. To address the challenges posed by modal heterogeneity, we develop a general FedMLLM framework that integrates four representative FL methods alongside two modality-agnostic strategies. Extensive experimental results show that our proposed FL paradigm improves the performance of MLLMs by broadening the range of training data and mitigating multimodal heterogeneity. Code is available at https://github.com/1xbq1/FedMLLM <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14717v1-abstract-full').style.display = 'none'; document.getElementById('2411.14717v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13343">arXiv:2411.13343</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13343">pdf</a>, <a href="https://arxiv.org/format/2411.13343">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Fact-Level Confidence Calibration and Self-Correction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+Y">Yige Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bingbing Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+H">Hexiang Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+F">Fei Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+T">Teng Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+W">Wei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+H">Huawei Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+X">Xueqi Cheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13343v1-abstract-short" style="display: inline;"> Confidence calibration in LLMs, i.e., aligning their self-assessed confidence with the actual accuracy of their responses, enabling them to self-evaluate the correctness of their outputs. However, current calibration methods for LLMs typically estimate two scalars to represent overall response confidence and correctness, which is inadequate for long-form generation where the response includes mult&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13343v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13343v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13343v1-abstract-full" style="display: none;"> Confidence calibration in LLMs, i.e., aligning their self-assessed confidence with the actual accuracy of their responses, enabling them to self-evaluate the correctness of their outputs. However, current calibration methods for LLMs typically estimate two scalars to represent overall response confidence and correctness, which is inadequate for long-form generation where the response includes multiple atomic facts and may be partially confident and correct. These methods also overlook the relevance of each fact to the query. To address these challenges, we propose a Fact-Level Calibration framework that operates at a finer granularity, calibrating confidence to relevance-weighted correctness at the fact level. Furthermore, comprehensive analysis under the framework inspired the development of Confidence-Guided Fact-level Self-Correction ($\textbf{ConFix}$), which uses high-confidence facts within a response as additional knowledge to improve low-confidence ones. Extensive experiments across four datasets and six models demonstrate that ConFix effectively mitigates hallucinations without requiring external knowledge sources such as retrieval systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13343v1-abstract-full').style.display = 'none'; document.getElementById('2411.13343v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Code is available at https://github.com/yuanyige/fact-calibration</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12776">arXiv:2411.12776</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12776">pdf</a>, <a href="https://arxiv.org/format/2411.12776">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> Cross-Layer Encrypted Semantic Communication Framework for Panoramic Video Transmission </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gao%2C+H">Haixiao Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+M">Mengying Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+X">Xiaodong Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bingxuan Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+S">Shujun Han</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+B">Bizhu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+S">Sheng Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+C">Chen Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+P">Ping Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12776v1-abstract-short" style="display: inline;"> In this paper, we propose a cross-layer encrypted semantic communication (CLESC) framework for panoramic video transmission, incorporating feature extraction, encoding, encryption, cyclic redundancy check (CRC), and retransmission processes to achieve compatibility between semantic communication and traditional communication systems. Additionally, we propose an adaptive cross-layer transmission me&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12776v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12776v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12776v1-abstract-full" style="display: none;"> In this paper, we propose a cross-layer encrypted semantic communication (CLESC) framework for panoramic video transmission, incorporating feature extraction, encoding, encryption, cyclic redundancy check (CRC), and retransmission processes to achieve compatibility between semantic communication and traditional communication systems. Additionally, we propose an adaptive cross-layer transmission mechanism that dynamically adjusts CRC, channel coding, and retransmission schemes based on the importance of semantic information. This ensures that important information is prioritized under poor transmission conditions. To verify the aforementioned framework, we also design an end-to-end adaptive panoramic video semantic transmission (APVST) network that leverages a deep joint source-channel coding (Deep JSCC) structure and attention mechanism, integrated with a latitude adaptive module that facilitates adaptive semantic feature extraction and variable-length encoding of panoramic videos. The proposed CLESC is also applicable to the transmission of other modal data. Simulation results demonstrate that the proposed CLESC effectively achieves compatibility and adaptation between semantic communication and traditional communication systems, improving both transmission efficiency and channel adaptability. Compared to traditional cross-layer transmission schemes, the CLESC framework can reduce bandwidth consumption by 85% while showing significant advantages under low signal-to-noise ratio (SNR) conditions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12776v1-abstract-full').style.display = 'none'; document.getElementById('2411.12776v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11812">arXiv:2411.11812</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11812">pdf</a>, <a href="https://arxiv.org/format/2411.11812">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> cHyRRT and cHySST: Two Motion Planning Tools for Hybrid Dynamical Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Beverly Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+N">Nan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Sanfelice%2C+R">Ricardo Sanfelice</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11812v1-abstract-short" style="display: inline;"> This paper describes two C++/Open Motion Planning Library implementations of the recently developed motion planning algorithms HyRRT arXiv:2210.15082v1 [cs.RO] and HySST arXiv:2305.18649v1 [cs.RO]. Specifically, cHyRRT, an implementation of the HyRRT algorithm, is capable of generating a solution to a motion planning problem for hybrid systems with probabilistically completeness, while cHySST, an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11812v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11812v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11812v1-abstract-full" style="display: none;"> This paper describes two C++/Open Motion Planning Library implementations of the recently developed motion planning algorithms HyRRT arXiv:2210.15082v1 [cs.RO] and HySST arXiv:2305.18649v1 [cs.RO]. Specifically, cHyRRT, an implementation of the HyRRT algorithm, is capable of generating a solution to a motion planning problem for hybrid systems with probabilistically completeness, while cHySST, an implementation of the asymptotically near-optimal HySST algorithm, is capable of computing a trajectory to solve the optimal motion planning problem for hybrid systems. cHyRRT is suitable for motion planning problems where an optimal solution is not required, whereas cHySST is suitable for such problems that prefer optimal solutions, within all feasible solutions. The structure, components, and usage of the two tools are described. Examples are included to illustrate the main capabilities of the toolbox. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11812v1-abstract-full').style.display = 'none'; document.getElementById('2411.11812v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This paper has 26 pages and has been submitted to 28th ACM International Conference on Hybrid Systems: Computation and Control</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.9 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11305">arXiv:2411.11305</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11305">pdf</a>, <a href="https://arxiv.org/ps/2411.11305">ps</a>, <a href="https://arxiv.org/format/2411.11305">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> TP-UNet: Temporal Prompt Guided UNet for Medical Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+R">Ranmin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhuang%2C+L">Limin Zhuang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Hongkun Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Boyan Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Cai%2C+R">Ruichu Cai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11305v2-abstract-short" style="display: inline;"> The advancement of medical image segmentation techniques has been propelled by the adoption of deep learning techniques, particularly UNet-based approaches, which exploit semantic information to improve the accuracy of segmentations. However, the order of organs in scanned images has been disregarded by current medical image segmentation approaches based on UNet. Furthermore, the inherent network&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11305v2-abstract-full').style.display = 'inline'; document.getElementById('2411.11305v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11305v2-abstract-full" style="display: none;"> The advancement of medical image segmentation techniques has been propelled by the adoption of deep learning techniques, particularly UNet-based approaches, which exploit semantic information to improve the accuracy of segmentations. However, the order of organs in scanned images has been disregarded by current medical image segmentation approaches based on UNet. Furthermore, the inherent network structure of UNet does not provide direct capabilities for integrating temporal information. To efficiently integrate temporal information, we propose TP-UNet that utilizes temporal prompts, encompassing organ-construction relationships, to guide the segmentation UNet model. Specifically, our framework is featured with cross-attention and semantic alignment based on unsupervised contrastive learning to combine temporal prompts and image features effectively. Extensive evaluations on two medical image segmentation datasets demonstrate the state-of-the-art performance of TP-UNet. Our implementation will be open-sourced after acceptance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11305v2-abstract-full').style.display = 'none'; document.getElementById('2411.11305v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11053">arXiv:2411.11053</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11053">pdf</a>, <a href="https://arxiv.org/format/2411.11053">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> SRA-MCTS: Self-driven Reasoning Augmentation with Monte Carlo Tree Search for Code Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bin Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Y">Yiguan Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yinghao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+Y">Yang Gao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11053v4-abstract-short" style="display: inline;"> Large language models demonstrate exceptional performance in simple code generation tasks but still face challenges in tackling complex problems. These challenges may stem from insufficient reasoning and problem decomposition capabilities. To address this issue, we propose a reasoning-augmented data generation process, SRA-MCTS, which guides the model to autonomously generate high-quality intermed&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11053v4-abstract-full').style.display = 'inline'; document.getElementById('2411.11053v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11053v4-abstract-full" style="display: none;"> Large language models demonstrate exceptional performance in simple code generation tasks but still face challenges in tackling complex problems. These challenges may stem from insufficient reasoning and problem decomposition capabilities. To address this issue, we propose a reasoning-augmented data generation process, SRA-MCTS, which guides the model to autonomously generate high-quality intermediate reasoning paths. This creates a positive feedback loop, enabling continuous improvement. Our method operates entirely through the model itself without requiring additional supervision. By synthesizing natural language reasoning paths and translating them into executable code, the approach ensures analytical accuracy and enhances the success rate in solving complex tasks. Experimental results show that, even without additional supervisory signals, our method achieves performance improvements across different model scales, demonstrating the significant potential of self-improvement in small models. Furthermore, the method remains robust when traditional Chain-of-Thought (CoT) approaches exhibit performance degradation, with notable improvements observed in diversity metrics such as pass@10. We encourage further exploration of reasoning processes within training data to enhance the ability of language models to address complex problems. Our code and data are public at https://github.com/DIRECT-BIT/SRA-MCTS. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11053v4-abstract-full').style.display = 'none'; document.getElementById('2411.11053v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10741">arXiv:2411.10741</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10741">pdf</a>, <a href="https://arxiv.org/format/2411.10741">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> MetaLA: Unified Optimal Linear Approximation to Softmax Attention Map </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chou%2C+Y">Yuhong Chou</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+M">Man Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+K">Kexin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+Y">Yuqi Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+R">Ruijie Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhong%2C+Y">Yiran Zhong</a>, <a href="/search/cs?searchtype=author&amp;query=Qiao%2C+Y">Yu Qiao</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+J">Jibin Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bo Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+G">Guoqi Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10741v1-abstract-short" style="display: inline;"> Various linear complexity models, such as Linear Transformer (LinFormer), State Space Model (SSM), and Linear RNN (LinRNN), have been proposed to replace the conventional softmax attention in Transformer structures. However, the optimal design of these linear models is still an open question. In this work, we attempt to answer this question by finding the best linear approximation to softmax atten&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10741v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10741v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10741v1-abstract-full" style="display: none;"> Various linear complexity models, such as Linear Transformer (LinFormer), State Space Model (SSM), and Linear RNN (LinRNN), have been proposed to replace the conventional softmax attention in Transformer structures. However, the optimal design of these linear models is still an open question. In this work, we attempt to answer this question by finding the best linear approximation to softmax attention from a theoretical perspective. We start by unifying existing linear complexity models as the linear attention form and then identify three conditions for the optimal linear attention design: 1) Dynamic memory ability; 2) Static approximation ability; 3) Least parameter approximation. We find that none of the current linear models meet all three conditions, resulting in suboptimal performance. Instead, we propose Meta Linear Attention (MetaLA) as a solution that satisfies these conditions. Our experiments on Multi-Query Associative Recall (MQAR) task, language modeling, image classification, and Long-Range Arena (LRA) benchmark demonstrate that MetaLA is more effective than the existing linear models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10741v1-abstract-full').style.display = 'none'; document.getElementById('2411.10741v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09906">arXiv:2411.09906</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09906">pdf</a>, <a href="https://arxiv.org/format/2411.09906">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> A Survey of Machine Learning-based Physical-Layer Authentication in Wireless Communications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Meng%2C+R">Rui Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bingxuan Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+X">Xiaodong Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+M">Mengying Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Wanga%2C+B">Bizhu Wanga</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+S">Shujun Han</a>, <a href="/search/cs?searchtype=author&amp;query=Lv%2C+S">Suyu Lv</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+P">Ping Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09906v1-abstract-short" style="display: inline;"> To ensure secure and reliable communication in wireless systems, authenticating the identities of numerous nodes is imperative. Traditional cryptography-based authentication methods suffer from issues such as low compatibility, reliability, and high complexity. Physical-Layer Authentication (PLA) is emerging as a promising complement due to its exploitation of unique properties in wireless environ&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09906v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09906v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09906v1-abstract-full" style="display: none;"> To ensure secure and reliable communication in wireless systems, authenticating the identities of numerous nodes is imperative. Traditional cryptography-based authentication methods suffer from issues such as low compatibility, reliability, and high complexity. Physical-Layer Authentication (PLA) is emerging as a promising complement due to its exploitation of unique properties in wireless environments. Recently, Machine Learning (ML)-based PLA has gained attention for its intelligence, adaptability, universality, and scalability compared to non-ML approaches. However, a comprehensive overview of state-of-the-art ML-based PLA and its foundational aspects is lacking. This paper presents a comprehensive survey of characteristics and technologies that can be used in the ML-based PLA. We categorize existing ML-based PLA schemes into two main types: multi-device identification and attack detection schemes. In deep learning-based multi-device identification schemes, Deep Neural Networks are employed to train models, avoiding complex processing and expert feature transformation. Deep learning-based multi-device identification schemes are further subdivided, with schemes based on Convolutional Neural Networks being extensively researched. In ML-based attack detection schemes, receivers utilize intelligent ML techniques to set detection thresholds automatically, eliminating the need for manual calculation or knowledge of channel models. ML-based attack detection schemes are categorized into three sub-types: Supervised Learning, Unsupervised Learning, and Reinforcement Learning. Additionally, we summarize open-source datasets used for PLA, encompassing Radio Frequency fingerprints and channel fingerprints. Finally, this paper outlines future research directions to guide researchers in related fields. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09906v1-abstract-full').style.display = 'none'; document.getElementById('2411.09906v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">111 pages, 9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07397">arXiv:2411.07397</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07397">pdf</a>, <a href="https://arxiv.org/format/2411.07397">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> Spiking Transformer Hardware Accelerators in 3D Integration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Boxun Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Hwang%2C+J">Junyoung Hwang</a>, <a href="/search/cs?searchtype=author&amp;query=Vanna-iampikul%2C+P">Pruek Vanna-iampikul</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+K">Sung Kyu Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+P">Peng Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07397v1-abstract-short" style="display: inline;"> Spiking neural networks (SNNs) are powerful models of spatiotemporal computation and are well suited for deployment on resource-constrained edge devices and neuromorphic hardware due to their low power consumption. Leveraging attention mechanisms similar to those found in their artificial neural network counterparts, recently emerged spiking transformers have showcased promising performance and ef&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07397v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07397v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07397v1-abstract-full" style="display: none;"> Spiking neural networks (SNNs) are powerful models of spatiotemporal computation and are well suited for deployment on resource-constrained edge devices and neuromorphic hardware due to their low power consumption. Leveraging attention mechanisms similar to those found in their artificial neural network counterparts, recently emerged spiking transformers have showcased promising performance and efficiency by capitalizing on the binary nature of spiking operations. Recognizing the current lack of dedicated hardware support for spiking transformers, this paper presents the first work on 3D spiking transformer hardware architecture and design methodology. We present an architecture and physical design co-optimization approach tailored specifically for spiking transformers. Through memory-on-logic and logic-on-logic stacking enabled by 3D integration, we demonstrate significant energy and delay improvements compared to conventional 2D CMOS integration. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07397v1-abstract-full').style.display = 'none'; document.getElementById('2411.07397v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05214">arXiv:2411.05214</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05214">pdf</a>, <a href="https://arxiv.org/format/2411.05214">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> STAND-Guard: A Small Task-Adaptive Content Moderation Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+M">Minjia Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+P">Pingping Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Cai%2C+S">Siqi Cai</a>, <a href="/search/cs?searchtype=author&amp;query=An%2C+S">Shengnan An</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+S">Shengjie Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Z">Zeqi Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+C">Congrui Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bixiong Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05214v1-abstract-short" style="display: inline;"> Content moderation, the process of reviewing and monitoring the safety of generated content, is important for development of welcoming online platforms and responsible large language models. Content moderation contains various tasks, each with its unique requirements tailored to specific scenarios. Therefore, it is crucial to develop a model that can be easily adapted to novel or customized conten&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05214v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05214v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05214v1-abstract-full" style="display: none;"> Content moderation, the process of reviewing and monitoring the safety of generated content, is important for development of welcoming online platforms and responsible large language models. Content moderation contains various tasks, each with its unique requirements tailored to specific scenarios. Therefore, it is crucial to develop a model that can be easily adapted to novel or customized content moderation tasks accurately without extensive model tuning. This paper presents STAND-GUARD, a Small Task-Adaptive coNtent moDeration model. The basic motivation is: by performing instruct tuning on various content moderation tasks, we can unleash the power of small language models (SLMs) on unseen (out-of-distribution) content moderation tasks. We also carefully study the effects of training tasks and model size on the efficacy of cross-task fine-tuning mechanism. Experiments demonstrate STAND-Guard is comparable to GPT-3.5-Turbo across over 40 public datasets, as well as proprietary datasets derived from real-world business scenarios. Remarkably, STAND-Guard achieved nearly equivalent results to GPT-4-Turbo on unseen English binary classification tasks <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05214v1-abstract-full').style.display = 'none'; document.getElementById('2411.05214v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 1 figure</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02019">arXiv:2411.02019</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02019">pdf</a>, <a href="https://arxiv.org/format/2411.02019">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> </div> <p class="title is-5 mathjax"> Modulating State Space Model with SlowFast Framework for Compute-Efficient Ultra Low-Latency Speech Enhancement </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+L">Longbiao Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+A">Ashutosh Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Buye Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Delbruck%2C+T">Tobi Delbruck</a>, <a href="/search/cs?searchtype=author&amp;query=Ithapu%2C+V+K">Vamsi Krishna Ithapu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shih-Chii Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02019v1-abstract-short" style="display: inline;"> Deep learning-based speech enhancement (SE) methods often face significant computational challenges when needing to meet low-latency requirements because of the increased number of frames to be processed. This paper introduces the SlowFast framework which aims to reduce computation costs specifically when low-latency enhancement is needed. The framework consists of a slow branch that analyzes the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02019v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02019v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02019v1-abstract-full" style="display: none;"> Deep learning-based speech enhancement (SE) methods often face significant computational challenges when needing to meet low-latency requirements because of the increased number of frames to be processed. This paper introduces the SlowFast framework which aims to reduce computation costs specifically when low-latency enhancement is needed. The framework consists of a slow branch that analyzes the acoustic environment at a low frame rate, and a fast branch that performs SE in the time domain at the needed higher frame rate to match the required latency. Specifically, the fast branch employs a state space model where its state transition process is dynamically modulated by the slow branch. Experiments on a SE task with a 2 ms algorithmic latency requirement using the Voice Bank + Demand dataset show that our approach reduces computation cost by 70% compared to a baseline single-branch network with equivalent parameters, without compromising enhancement performance. Furthermore, by leveraging the SlowFast framework, we implemented a network that achieves an algorithmic latency of just 60 渭s (one sample point at 16 kHz sample rate) with a computation cost of 100 M MACs/s, while scoring a PESQ-NB of 3.12 and SISNR of 16.62. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02019v1-abstract-full').style.display = 'none'; document.getElementById('2411.02019v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to ICASSP 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01245">arXiv:2411.01245</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01245">pdf</a>, <a href="https://arxiv.org/format/2411.01245">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> PMoL: Parameter Efficient MoE for Preference Mixing of LLM Alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+D">Dongxu Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bing Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yinzhuo Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bufan Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+W">Wenpeng Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+M">Muyun Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+T">Tiejun Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01245v1-abstract-short" style="display: inline;"> Reinforcement Learning from Human Feedback (RLHF) has been proven to be an effective method for preference alignment of large language models (LLMs) and is widely used in the post-training process of LLMs. However, RLHF struggles with handling multiple competing preferences. This leads to a decrease in the alignment of LLMs with human preferences. To address this issue, we propose Preference Mixtu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01245v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01245v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01245v1-abstract-full" style="display: none;"> Reinforcement Learning from Human Feedback (RLHF) has been proven to be an effective method for preference alignment of large language models (LLMs) and is widely used in the post-training process of LLMs. However, RLHF struggles with handling multiple competing preferences. This leads to a decrease in the alignment of LLMs with human preferences. To address this issue, we propose Preference Mixture of LoRAs (PMoL) from the perspective of model architecture, which can adapt to any number of preferences to mix. PMoL combines Mixture of Experts (MoE) and Low Rank Adaptor (LoRA). This architecture is innovatively applied to the research of preference alignment and has achieved significant performance improvement. The expert group soft loss is used to enable MoE with the ability to mix preferences. Through comprehensive evaluation by the reward model and GPT-4o, the experiment results show that PMoL has superior preference mixing capabilities compared to baseline methods. PMoL achieves better preference alignment with lower training costs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01245v1-abstract-full').style.display = 'none'; document.getElementById('2411.01245v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.00332">arXiv:2411.00332</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.00332">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Mesoscale and Nanoscale Physics">cond-mat.mes-hall</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> In-situ Self-optimization of Quantum Dot Emission for Lasers by Machine-Learning Assisted Epitaxy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shen%2C+C">Chao Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhan%2C+W">Wenkang Zhan</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+S">Shujie Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Hao%2C+H">Hongyue Hao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhuo%2C+N">Ning Zhuo</a>, <a href="/search/cs?searchtype=author&amp;query=Xin%2C+K">Kaiyao Xin</a>, <a href="/search/cs?searchtype=author&amp;query=Cong%2C+H">Hui Cong</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+C">Chi Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bo Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Ng%2C+T+K">Tien Khee Ng</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+S">Siming Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+C">Chunlai Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+F">Fengqi Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhanguo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chao Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.00332v1-abstract-short" style="display: inline;"> Traditional methods for optimizing light source emissions rely on a time-consuming trial-and-error approach. While in-situ optimization of light source gain media emission during growth is ideal, it has yet to be realized. In this work, we integrate in-situ reflection high-energy electron diffraction (RHEED) with machine learning (ML) to correlate the surface reconstruction with the photoluminesce&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00332v1-abstract-full').style.display = 'inline'; document.getElementById('2411.00332v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.00332v1-abstract-full" style="display: none;"> Traditional methods for optimizing light source emissions rely on a time-consuming trial-and-error approach. While in-situ optimization of light source gain media emission during growth is ideal, it has yet to be realized. In this work, we integrate in-situ reflection high-energy electron diffraction (RHEED) with machine learning (ML) to correlate the surface reconstruction with the photoluminescence (PL) of InAs/GaAs quantum dots (QDs), which serve as the active region of lasers. A lightweight ResNet-GLAM model is employed for the real-time processing of RHEED data as input, enabling effective identification of optical performance. This approach guides the dynamic optimization of growth parameters, allowing real-time feedback control to adjust the QDs emission for lasers. We successfully optimized InAs QDs on GaAs substrates, with a 3.2-fold increase in PL intensity and a reduction in full width at half maximum (FWHM) from 36.69 meV to 28.17 meV under initially suboptimal growth conditions. Our automated, in-situ self-optimized lasers with 5-layer InAs QDs achieved electrically pumped continuous-wave operation at 1240 nm with a low threshold current of 150 A/cm2 at room temperature, an excellent performance comparable to samples grown through traditional manual multi-parameter optimization methods. These results mark a significant step toward intelligent, low-cost, and reproductive light emitters production. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00332v1-abstract-full').style.display = 'none'; document.getElementById('2411.00332v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.24175">arXiv:2410.24175</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.24175">pdf</a>, <a href="https://arxiv.org/format/2410.24175">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Constraint Back-translation Improves Complex Instruction Following of Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Qi%2C+Y">Yunjia Qi</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+H">Hao Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xiaozhi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bin Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Hou%2C+L">Lei Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+J">Juanzi Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.24175v1-abstract-short" style="display: inline;"> Large language models (LLMs) struggle to follow instructions with complex constraints in format, length, etc. Following the conventional instruction-tuning practice, previous works conduct post-training on complex instruction-response pairs generated by feeding complex instructions to advanced LLMs. However, even advanced LLMs cannot follow complex instructions well, thus limiting the quality of g&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.24175v1-abstract-full').style.display = 'inline'; document.getElementById('2410.24175v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.24175v1-abstract-full" style="display: none;"> Large language models (LLMs) struggle to follow instructions with complex constraints in format, length, etc. Following the conventional instruction-tuning practice, previous works conduct post-training on complex instruction-response pairs generated by feeding complex instructions to advanced LLMs. However, even advanced LLMs cannot follow complex instructions well, thus limiting the quality of generated data. In this work, we find that existing datasets inherently contain implicit complex constraints and propose a novel data generation technique, constraint back-translation. Specifically, we take the high-quality instruction-response pairs in existing datasets and only adopt advanced LLMs to add complex constraints already met by the responses to the instructions, which naturally reduces costs and data noise. In the experiments, we adopt Llama3-70B-Instruct to back-translate constraints and create a high-quality complex instruction-response dataset, named CRAB. We present that post-training on CRAB improves multiple backbone LLMs&#39; complex instruction-following ability, evaluated on extensive instruction-following benchmarks. We further find that constraint back-translation also serves as a useful auxiliary training objective in post-training. Our code, data, and models will be released to facilitate future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.24175v1-abstract-full').style.display = 'none'; document.getElementById('2410.24175v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.22144">arXiv:2410.22144</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.22144">pdf</a>, <a href="https://arxiv.org/ps/2410.22144">ps</a>, <a href="https://arxiv.org/format/2410.22144">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Theoretical Economics">econ.TH</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> </div> </div> <p class="title is-5 mathjax"> The equilibrium properties of obvious strategy profiles in games with many players </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+E+C+B+W+H">Enxian Chen Bin Wu Hanping Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.22144v1-abstract-short" style="display: inline;"> This paper studies the equilibrium properties of the ``obvious strategy profile&#39;&#39; in large finite-player games. Each player in such a strategy profile simply adopts a randomized strategy as she would have used in a symmetric equilibrium of an idealized large game. We show that, under a continuity assumption, (i) obvious strategy profiles constitute a convergent sequence of approximate symmetric eq&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22144v1-abstract-full').style.display = 'inline'; document.getElementById('2410.22144v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.22144v1-abstract-full" style="display: none;"> This paper studies the equilibrium properties of the ``obvious strategy profile&#39;&#39; in large finite-player games. Each player in such a strategy profile simply adopts a randomized strategy as she would have used in a symmetric equilibrium of an idealized large game. We show that, under a continuity assumption, (i) obvious strategy profiles constitute a convergent sequence of approximate symmetric equilibria as the number of players tends to infinity, and (ii) realizations of such strategy profiles also form a convergent sequence of (pure strategy) approximate equilibria with probability approaching one. Our findings offer a solution that is easily implemented without coordination issues and is asymptotically optimal for players in large finite games. Additionally, we present a convergence result for approximate symmetric equilibria. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22144v1-abstract-full').style.display = 'none'; document.getElementById('2410.22144v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.20675">arXiv:2410.20675</a> <span>&nbsp;&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Impact of Translation and Viewpoint Transition Methods in VR on Spatial Learning and Cybersickness </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mostafavi%2C+A">Armin Mostafavi</a>, <a href="/search/cs?searchtype=author&amp;query=Qiu%2C+Z">Zhiwen Qiu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+T+B">Tong Bill Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Kalantari%2C+S">Saleh Kalantari</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.20675v2-abstract-short" style="display: inline;"> Virtual locomotion technique (VLT) is a fundamental component of virtual reality (VR) systems that translates physical and controller inputs into virtual translational movements and viewpoint transitions. Poorly designed VLTs can result in discomfort, nausea, and reductions in task performance. Understanding the effectiveness of VLTs across various levels of interaction fidelity is crucial to enha&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20675v2-abstract-full').style.display = 'inline'; document.getElementById('2410.20675v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.20675v2-abstract-full" style="display: none;"> Virtual locomotion technique (VLT) is a fundamental component of virtual reality (VR) systems that translates physical and controller inputs into virtual translational movements and viewpoint transitions. Poorly designed VLTs can result in discomfort, nausea, and reductions in task performance. Understanding the effectiveness of VLTs across various levels of interaction fidelity is crucial to enhance user experience and spatial awareness. The current study addressed a significant gap in VR design research and practice, as few previous efforts have been made to comprehensively evaluate the effectiveness of controller-based VLTs in virtual indoor environments. We conducted a user study in which participants navigated through two complex virtual environments, one focusing on exploratory tasks and the other on goal-oriented navigation. The findings offer insights into the trade-offs among spatial knowledge acquisition, wayfinding performance, cybersickness, and sense of presence, and have design implications for future VR interfaces. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20675v2-abstract-full').style.display = 'none'; document.getElementById('2410.20675v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The work needs revision and will be updated later</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.19743">arXiv:2410.19743</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.19743">pdf</a>, <a href="https://arxiv.org/format/2410.19743">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> AppBench: Planning of Multiple APIs from Various APPs for Complex User Instruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongru Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+R">Rui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+B">Boyang Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+H">Heming Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+J">Jingtao Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zeming Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+J+Z">Jeff Z. Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Wong%2C+K">Kam-Fai Wong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.19743v1-abstract-short" style="display: inline;"> Large Language Models (LLMs) can interact with the real world by connecting with versatile external APIs, resulting in better problem-solving and task automation capabilities. Previous research primarily focuses on APIs with limited arguments from a single source or overlooks the complex dependency relationship between different APIs. However, it is essential to utilize multiple APIs collaborative&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.19743v1-abstract-full').style.display = 'inline'; document.getElementById('2410.19743v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.19743v1-abstract-full" style="display: none;"> Large Language Models (LLMs) can interact with the real world by connecting with versatile external APIs, resulting in better problem-solving and task automation capabilities. Previous research primarily focuses on APIs with limited arguments from a single source or overlooks the complex dependency relationship between different APIs. However, it is essential to utilize multiple APIs collaboratively from various sources (e.g., different Apps in the iPhone), especially for complex user instructions. In this paper, we introduce \texttt{AppBench}, the first benchmark to evaluate LLMs&#39; ability to plan and execute multiple APIs from various sources in order to complete the user&#39;s task. Specifically, we consider two significant challenges in multiple APIs: \textit{1) graph structures:} some APIs can be executed independently while others need to be executed one by one, resulting in graph-like execution order; and \textit{2) permission constraints:} which source is authorized to execute the API call. We have experimental results on 9 distinct LLMs; e.g., GPT-4o achieves only a 2.0\% success rate at the most complex instruction, revealing that the existing state-of-the-art LLMs still cannot perform well in this situation even with the help of in-context learning and finetuning. Our code and data are publicly available at https://github.com/ruleGreen/AppBench. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.19743v1-abstract-full').style.display = 'none'; document.getElementById('2410.19743v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.18495">arXiv:2410.18495</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.18495">pdf</a>, <a href="https://arxiv.org/format/2410.18495">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Multi-UAV Behavior-based Formation with Static and Dynamic Obstacles Avoidance via Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Y">Yuqing Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+C">Chao Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Zang%2C+H">Hongzhi Zang</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+F">Feng Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+W">Wenhao Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+J">Jingyi Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jiayu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Botian Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yi Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yu Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.18495v1-abstract-short" style="display: inline;"> Formation control of multiple Unmanned Aerial Vehicles (UAVs) is vital for practical applications. This paper tackles the task of behavior-based UAV formation while avoiding static and dynamic obstacles during directed flight. We present a two-stage reinforcement learning (RL) training pipeline to tackle the challenge of multi-objective optimization, large exploration spaces, and the sim-to-real g&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18495v1-abstract-full').style.display = 'inline'; document.getElementById('2410.18495v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.18495v1-abstract-full" style="display: none;"> Formation control of multiple Unmanned Aerial Vehicles (UAVs) is vital for practical applications. This paper tackles the task of behavior-based UAV formation while avoiding static and dynamic obstacles during directed flight. We present a two-stage reinforcement learning (RL) training pipeline to tackle the challenge of multi-objective optimization, large exploration spaces, and the sim-to-real gap. The first stage searches in a simplified scenario for a linear utility function that balances all task objectives simultaneously, whereas the second stage applies the utility function in complex scenarios, utilizing curriculum learning to navigate large exploration spaces. Additionally, we apply an attention-based observation encoder to enhance formation maintenance and manage varying obstacle quantity. Experiments in simulation and real world demonstrate that our method outperforms planning-based and RL-based baselines regarding collision-free rate and formation maintenance in scenarios with static, dynamic, and mixed obstacles. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18495v1-abstract-full').style.display = 'none'; document.getElementById('2410.18495v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.15631">arXiv:2410.15631</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.15631">pdf</a>, <a href="https://arxiv.org/format/2410.15631">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Security of Language Models for Code: A Systematic Literature Review </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yuchen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+W">Weisong Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+C">Chunrong Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zhenpeng Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Ge%2C+Y">Yifei Ge</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+T">Tingxu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Quanjun Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zhenyu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Baowen Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.15631v1-abstract-short" style="display: inline;"> Language models for code (CodeLMs) have emerged as powerful tools for code-related tasks, outperforming traditional methods and standard machine learning approaches. However, these models are susceptible to security vulnerabilities, drawing increasing research attention from domains such as software engineering, artificial intelligence, and cybersecurity. Despite the growing body of research focus&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15631v1-abstract-full').style.display = 'inline'; document.getElementById('2410.15631v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.15631v1-abstract-full" style="display: none;"> Language models for code (CodeLMs) have emerged as powerful tools for code-related tasks, outperforming traditional methods and standard machine learning approaches. However, these models are susceptible to security vulnerabilities, drawing increasing research attention from domains such as software engineering, artificial intelligence, and cybersecurity. Despite the growing body of research focused on the security of CodeLMs, a comprehensive survey in this area remains absent. To address this gap, we systematically review 67 relevant papers, organizing them based on attack and defense strategies. Furthermore, we provide an overview of commonly used language models, datasets, and evaluation metrics, and highlight open-source tools and promising directions for future research in securing CodeLMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15631v1-abstract-full').style.display = 'none'; document.getElementById('2410.15631v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.14276">arXiv:2410.14276</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.14276">pdf</a>, <a href="https://arxiv.org/format/2410.14276">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> EcomEdit: An Automated E-commerce Knowledge Editing Framework for Enhanced Product and Purchase Intention Understanding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lau%2C+C+M+S">Ching Ming Samuel Lau</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Weiqi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+H">Haochen Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Baixuan Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+J">Jiaxin Bai</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+Y">Yangqiu Song</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.14276v1-abstract-short" style="display: inline;"> Knowledge Editing (KE) aims to correct and update factual information in Large Language Models (LLMs) to ensure accuracy and relevance without computationally expensive fine-tuning. Though it has been proven effective in several domains, limited work has focused on its application within the e-commerce sector. However, there are naturally occurring scenarios that make KE necessary in this domain,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14276v1-abstract-full').style.display = 'inline'; document.getElementById('2410.14276v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.14276v1-abstract-full" style="display: none;"> Knowledge Editing (KE) aims to correct and update factual information in Large Language Models (LLMs) to ensure accuracy and relevance without computationally expensive fine-tuning. Though it has been proven effective in several domains, limited work has focused on its application within the e-commerce sector. However, there are naturally occurring scenarios that make KE necessary in this domain, such as the timely updating of product features and trending purchase intentions by customers, which necessitate further exploration. In this paper, we pioneer the application of KE in the e-commerce domain by presenting ECOMEDIT, an automated e-commerce knowledge editing framework tailored for e-commerce-related knowledge and tasks. Our framework leverages more powerful LLMs as judges to enable automatic knowledge conflict detection and incorporates conceptualization to enhance the semantic coverage of the knowledge to be edited. Through extensive experiments, we demonstrate the effectiveness of ECOMEDIT in improving LLMs&#39; understanding of product descriptions and purchase intentions. We also show that LLMs, after our editing, can achieve stronger performance on downstream e-commerce tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14276v1-abstract-full').style.display = 'none'; document.getElementById('2410.14276v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.12478">arXiv:2410.12478</a> <span>&nbsp;&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> MlingConf: A Comprehensive Study of Multilingual Confidence Estimation on Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xue%2C+B">Boyang Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongru Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+R">Rui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Sheng Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zezhong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+Y">Yiming Du</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+B">Bin Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Wong%2C+K">Kam-Fai Wong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.12478v2-abstract-short" style="display: inline;"> The tendency of Large Language Models (LLMs) to generate hallucinations raises concerns regarding their reliability. Therefore, confidence estimations indicating the extent of trustworthiness of the generations become essential. However, current LLM confidence estimations in languages other than English remain underexplored. This paper addresses this gap by introducing a comprehensive investigatio&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12478v2-abstract-full').style.display = 'inline'; document.getElementById('2410.12478v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.12478v2-abstract-full" style="display: none;"> The tendency of Large Language Models (LLMs) to generate hallucinations raises concerns regarding their reliability. Therefore, confidence estimations indicating the extent of trustworthiness of the generations become essential. However, current LLM confidence estimations in languages other than English remain underexplored. This paper addresses this gap by introducing a comprehensive investigation of Multilingual Confidence estimation (MlingConf) on LLMs, focusing on both language-agnostic (LA) and language-specific (LS) tasks to explore the performance and language dominance effects of multilingual confidence estimations on different tasks. The benchmark comprises four meticulously checked and human-evaluate high-quality multilingual datasets for LA tasks and one for the LS task tailored to specific social, cultural, and geographical contexts of a language. Our experiments reveal that on LA tasks English exhibits notable linguistic dominance in confidence estimations than other languages, while on LS tasks, using question-related language to prompt LLMs demonstrates better linguistic dominance in multilingual confidence estimations. The phenomena inspire a simple yet effective native-tone prompting strategy by employing language-specific prompts for LS tasks, effectively improving LLMs&#39; reliability and accuracy on LS tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12478v2-abstract-full').style.display = 'none'; document.getElementById('2410.12478v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Comments: This work was intended as a replacement of arXiv:2402.13606 and any subsequent updates will appear there</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.11458">arXiv:2410.11458</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.11458">pdf</a>, <a href="https://arxiv.org/format/2410.11458">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Engineering, Finance, and Science">cs.CE</span> </div> </div> <p class="title is-5 mathjax"> PANACEA: Towards Influence-driven Profiling of Drug Target Combinations in Cancer Signaling Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Baihui Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Bhowmick%2C+S+S">Sourav S Bhowmick</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+J">Jiancheng Hu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.11458v1-abstract-short" style="display: inline;"> Data profiling has garnered increasing attention within the data science community, primarily focusing on structured data. In this paper, we introduce a novel framework called panacea, designed to profile known cancer target combinations in cancer type-specific signaling networks. Given a large signaling network for a cancer type, known targets from approved anticancer drugs, a set of cancer mutat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11458v1-abstract-full').style.display = 'inline'; document.getElementById('2410.11458v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.11458v1-abstract-full" style="display: none;"> Data profiling has garnered increasing attention within the data science community, primarily focusing on structured data. In this paper, we introduce a novel framework called panacea, designed to profile known cancer target combinations in cancer type-specific signaling networks. Given a large signaling network for a cancer type, known targets from approved anticancer drugs, a set of cancer mutated genes, and a combination size parameter k, panacea automatically generates a delta histogram that depicts the distribution of k-sized target combinations based on their topological influence on cancer mutated genes and other nodes. To this end, we formally define the novel problem of influence-driven target combination profiling (i-TCP) and propose an algorithm that employs two innovative personalized PageRank-based measures, PEN distance and PEN-diff, to quantify this influence and generate the delta histogram. Our experimental studies on signaling networks related to four cancer types demonstrate that our proposed measures outperform several popular network properties in profiling known target combinations. Notably, we demonstrate that panacea can significantly reduce the candidate k-node combination exploration space, addressing a longstanding challenge for tasks such as in silico target combination prediction in large cancer-specific signaling networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11458v1-abstract-full').style.display = 'none'; document.getElementById('2410.11458v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 13 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.10594">arXiv:2410.10594</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.10594">pdf</a>, <a href="https://arxiv.org/format/2410.10594">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> VisRAG: Vision-based Retrieval-augmented Generation on Multi-modality Documents </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yu%2C+S">Shi Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+C">Chaoyue Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bokai Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+J">Junbo Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Ran%2C+J">Junhao Ran</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+Y">Yukun Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhenghao Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shuo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+X">Xu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhiyuan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+M">Maosong Sun</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.10594v1-abstract-short" style="display: inline;"> Retrieval-augmented generation (RAG) is an effective technique that enables large language models (LLMs) to utilize external knowledge sources for generation. However, current RAG systems are solely based on text, rendering it impossible to utilize vision information like layout and images that play crucial roles in real-world multi-modality documents. In this paper, we introduce VisRAG, which tac&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10594v1-abstract-full').style.display = 'inline'; document.getElementById('2410.10594v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.10594v1-abstract-full" style="display: none;"> Retrieval-augmented generation (RAG) is an effective technique that enables large language models (LLMs) to utilize external knowledge sources for generation. However, current RAG systems are solely based on text, rendering it impossible to utilize vision information like layout and images that play crucial roles in real-world multi-modality documents. In this paper, we introduce VisRAG, which tackles this issue by establishing a vision-language model (VLM)-based RAG pipeline. In this pipeline, instead of first parsing the document to obtain text, the document is directly embedded using a VLM as an image and then retrieved to enhance the generation of a VLM. Compared to traditional text-based RAG, VisRAG maximizes the retention and utilization of the data information in the original documents, eliminating the information loss introduced during the parsing process. We collect both open-source and synthetic data to train the retriever in VisRAG and explore a variety of generation methods. Experiments demonstrate that VisRAG outperforms traditional RAG in both the retrieval and generation stages, achieving a 25--39\% end-to-end performance gain over traditional text-based RAG pipeline. Further analysis reveals that VisRAG is effective in utilizing training data and demonstrates strong generalization capability, positioning it as a promising solution for RAG on multi-modality documents. Our code and data are available at https://github.com/openbmb/visrag . <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10594v1-abstract-full').style.display = 'none'; document.getElementById('2410.10594v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.10454">arXiv:2410.10454</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.10454">pdf</a>, <a href="https://arxiv.org/format/2410.10454">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Improve Meta-learning for Few-Shot Text Classification with All You Can Acquire from the Tasks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xinyue Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+Y">Yunlong Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Zong%2C+L">Linlin Zong</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bo Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.10454v1-abstract-short" style="display: inline;"> Meta-learning has emerged as a prominent technology for few-shot text classification and has achieved promising performance. However, existing methods often encounter difficulties in drawing accurate class prototypes from support set samples, primarily due to probable large intra-class differences and small inter-class differences within the task. Recent approaches attempt to incorporate external&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10454v1-abstract-full').style.display = 'inline'; document.getElementById('2410.10454v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.10454v1-abstract-full" style="display: none;"> Meta-learning has emerged as a prominent technology for few-shot text classification and has achieved promising performance. However, existing methods often encounter difficulties in drawing accurate class prototypes from support set samples, primarily due to probable large intra-class differences and small inter-class differences within the task. Recent approaches attempt to incorporate external knowledge or pre-trained language models to augment data, but this requires additional resources and thus does not suit many few-shot scenarios. In this paper, we propose a novel solution to address this issue by adequately leveraging the information within the task itself. Specifically, we utilize label information to construct a task-adaptive metric space, thereby adaptively reducing the intra-class differences and magnifying the inter-class differences. We further employ the optimal transport technique to estimate class prototypes with query set samples together, mitigating the problem of inaccurate and ambiguous support set samples caused by large intra-class differences. We conduct extensive experiments on eight benchmark datasets, and our approach shows obvious advantages over state-of-the-art models across all the tasks on all the datasets. For reproducibility, all the datasets and codes are available at https://github.com/YvoGao/LAQDA. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10454v1-abstract-full').style.display = 'none'; document.getElementById('2410.10454v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by EMNLP 2024 Findings</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.09398">arXiv:2410.09398</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.09398">pdf</a>, <a href="https://arxiv.org/format/2410.09398">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MITA: Bridging the Gap between Model and Data for Test-time Adaptation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+Y">Yige Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bingbing Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+T">Teng Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Hou%2C+L">Liang Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+F">Fei Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+H">Huawei Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+X">Xueqi Cheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.09398v1-abstract-short" style="display: inline;"> Test-Time Adaptation (TTA) has emerged as a promising paradigm for enhancing the generalizability of models. However, existing mainstream TTA methods, predominantly operating at batch level, often exhibit suboptimal performance in complex real-world scenarios, particularly when confronting outliers or mixed distributions. This phenomenon stems from a pronounced over-reliance on statistical pattern&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09398v1-abstract-full').style.display = 'inline'; document.getElementById('2410.09398v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.09398v1-abstract-full" style="display: none;"> Test-Time Adaptation (TTA) has emerged as a promising paradigm for enhancing the generalizability of models. However, existing mainstream TTA methods, predominantly operating at batch level, often exhibit suboptimal performance in complex real-world scenarios, particularly when confronting outliers or mixed distributions. This phenomenon stems from a pronounced over-reliance on statistical patterns over the distinct characteristics of individual instances, resulting in a divergence between the distribution captured by the model and data characteristics. To address this challenge, we propose Meet-In-The-Middle based Test-Time Adaptation ($\textbf{MITA}$), which introduces energy-based optimization to encourage mutual adaptation of the model and data from opposing directions, thereby meeting in the middle. MITA pioneers a significant departure from traditional approaches that focus solely on aligning the model to the data, facilitating a more effective bridging of the gap between model&#39;s distribution and data characteristics. Comprehensive experiments with MITA across three distinct scenarios (Outlier, Mixture, and Pure) demonstrate its superior performance over SOTA methods, highlighting its potential to significantly enhance generalizability in practical applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09398v1-abstract-full').style.display = 'none'; document.getElementById('2410.09398v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.08588">arXiv:2410.08588</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.08588">pdf</a>, <a href="https://arxiv.org/format/2410.08588">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ViT3D Alignment of LLaMA3: 3D Medical Image Report Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+S">Siyou Li</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Beining Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+Y">Yihao Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Nie%2C+D">Dong Nie</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Le Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.08588v1-abstract-short" style="display: inline;"> Automatic medical report generation (MRG), which aims to produce detailed text reports from medical images, has emerged as a critical task in this domain. MRG systems can enhance radiological workflows by reducing the time and effort required for report writing, thereby improving diagnostic efficiency. In this work, we present a novel approach for automatic MRG utilizing a multimodal large languag&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.08588v1-abstract-full').style.display = 'inline'; document.getElementById('2410.08588v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.08588v1-abstract-full" style="display: none;"> Automatic medical report generation (MRG), which aims to produce detailed text reports from medical images, has emerged as a critical task in this domain. MRG systems can enhance radiological workflows by reducing the time and effort required for report writing, thereby improving diagnostic efficiency. In this work, we present a novel approach for automatic MRG utilizing a multimodal large language model. Specifically, we employed the 3D Vision Transformer (ViT3D) image encoder introduced from M3D-CLIP to process 3D scans and use the Asclepius-Llama3-8B as the language model to generate the text reports by auto-regressive decoding. The experiment shows our model achieved an average Green score of 0.3 on the MRG task validation set and an average accuracy of 0.61 on the visual question answering (VQA) task validation set, outperforming the baseline model. Our approach demonstrates the effectiveness of the ViT3D alignment of LLaMA3 for automatic MRG and VQA tasks by tuning the model on a small dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.08588v1-abstract-full').style.display = 'none'; document.getElementById('2410.08588v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.08172">arXiv:2410.08172</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.08172">pdf</a>, <a href="https://arxiv.org/format/2410.08172">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> On the Evaluation of Generative Robotic Simulations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+F">Feng Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Botian Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Hua%2C+P">Pu Hua</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+P">Peiqi Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yanchao Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Y">Yi Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+H">Huazhe Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.08172v1-abstract-short" style="display: inline;"> Due to the difficulty of acquiring extensive real-world data, robot simulation has become crucial for parallel training and sim-to-real transfer, highlighting the importance of scalable simulated robotic tasks. Foundation models have demonstrated impressive capacities in autonomously generating feasible robotic tasks. However, this new paradigm underscores the challenge of adequately evaluating th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.08172v1-abstract-full').style.display = 'inline'; document.getElementById('2410.08172v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.08172v1-abstract-full" style="display: none;"> Due to the difficulty of acquiring extensive real-world data, robot simulation has become crucial for parallel training and sim-to-real transfer, highlighting the importance of scalable simulated robotic tasks. Foundation models have demonstrated impressive capacities in autonomously generating feasible robotic tasks. However, this new paradigm underscores the challenge of adequately evaluating these autonomously generated tasks. To address this, we propose a comprehensive evaluation framework tailored to generative simulations. Our framework segments evaluation into three core aspects: quality, diversity, and generalization. For single-task quality, we evaluate the realism of the generated task and the completeness of the generated trajectories using large language models and vision-language models. In terms of diversity, we measure both task and data diversity through text similarity of task descriptions and world model loss trained on collected task trajectories. For task-level generalization, we assess the zero-shot generalization ability on unseen tasks of a policy trained with multiple generated tasks. Experiments conducted on three representative task generation pipelines demonstrate that the results from our framework are highly consistent with human evaluations, confirming the feasibility and validity of our approach. The findings reveal that while metrics of quality and diversity can be achieved through certain methods, no single approach excels across all metrics, suggesting a need for greater focus on balancing these different metrics. Additionally, our analysis further highlights the common challenge of low generalization capability faced by current works. Our anonymous website: https://sites.google.com/view/evaltasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.08172v1-abstract-full').style.display = 'none'; document.getElementById('2410.08172v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project website: https://sites.google.com/view/evaltasks</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.06244">arXiv:2410.06244</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.06244">pdf</a>, <a href="https://arxiv.org/format/2410.06244">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Story-Adapter: A Training-free Iterative Framework for Long Story Visualization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mao%2C+J">Jiawei Mao</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+X">Xiaoke Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Y">Yunfei Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+Y">Yuanqi Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Hui%2C+M">Mude Hui</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bingjie Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yuyin Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.06244v1-abstract-short" style="display: inline;"> Story visualization, the task of generating coherent images based on a narrative, has seen significant advancements with the emergence of text-to-image models, particularly diffusion models. However, maintaining semantic consistency, generating high-quality fine-grained interactions, and ensuring computational feasibility remain challenging, especially in long story visualization (i.e., up to 100&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06244v1-abstract-full').style.display = 'inline'; document.getElementById('2410.06244v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.06244v1-abstract-full" style="display: none;"> Story visualization, the task of generating coherent images based on a narrative, has seen significant advancements with the emergence of text-to-image models, particularly diffusion models. However, maintaining semantic consistency, generating high-quality fine-grained interactions, and ensuring computational feasibility remain challenging, especially in long story visualization (i.e., up to 100 frames). In this work, we propose a training-free and computationally efficient framework, termed Story-Adapter, to enhance the generative capability of long stories. Specifically, we propose an iterative paradigm to refine each generated image, leveraging both the text prompt and all generated images from the previous iteration. Central to our framework is a training-free global reference cross-attention module, which aggregates all generated images from the previous iteration to preserve semantic consistency across the entire story, while minimizing computational costs with global embeddings. This iterative process progressively optimizes image generation by repeatedly incorporating text constraints, resulting in more precise and fine-grained interactions. Extensive experiments validate the superiority of Story-Adapter in improving both semantic consistency and generative capability for fine-grained interactions, particularly in long story scenarios. The project page and associated code can be accessed via https://jwmao1.github.io/storyadapter . <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06244v1-abstract-full').style.display = 'none'; document.getElementById('2410.06244v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 16 figures, The project page and associated code can be accessed via https://jwmao1.github.io/storyadapter</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.05514">arXiv:2410.05514</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.05514">pdf</a>, <a href="https://arxiv.org/format/2410.05514">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Toward General Object-level Mapping from Sparse Views with 3D Diffusion Priors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liao%2C+Z">Ziwei Liao</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Binbin Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Waslander%2C+S+L">Steven L. Waslander</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.05514v1-abstract-short" style="display: inline;"> Object-level mapping builds a 3D map of objects in a scene with detailed shapes and poses from multi-view sensor observations. Conventional methods struggle to build complete shapes and estimate accurate poses due to partial occlusions and sensor noise. They require dense observations to cover all objects, which is challenging to achieve in robotics trajectories. Recent work introduces generative&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.05514v1-abstract-full').style.display = 'inline'; document.getElementById('2410.05514v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.05514v1-abstract-full" style="display: none;"> Object-level mapping builds a 3D map of objects in a scene with detailed shapes and poses from multi-view sensor observations. Conventional methods struggle to build complete shapes and estimate accurate poses due to partial occlusions and sensor noise. They require dense observations to cover all objects, which is challenging to achieve in robotics trajectories. Recent work introduces generative shape priors for object-level mapping from sparse views, but is limited to single-category objects. In this work, we propose a General Object-level Mapping system, GOM, which leverages a 3D diffusion model as shape prior with multi-category support and outputs Neural Radiance Fields (NeRFs) for both texture and geometry for all objects in a scene. GOM includes an effective formulation to guide a pre-trained diffusion model with extra nonlinear constraints from sensor measurements without finetuning. We also develop a probabilistic optimization formulation to fuse multi-view sensor observations and diffusion priors for joint 3D object pose and shape estimation. Our GOM system demonstrates superior multi-category mapping performance from sparse views, and achieves more accurate mapping results compared to state-of-the-art methods on the real-world benchmarks. We will release our code: https://github.com/TRAILab/GeneralObjectMapping. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.05514v1-abstract-full').style.display = 'none'; document.getElementById('2410.05514v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by CoRL 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.04986">arXiv:2410.04986</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.04986">pdf</a>, <a href="https://arxiv.org/format/2410.04986">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Finding Safety Violations of AI-Enabled Control Systems through the Lens of Synthesized Proxy Programs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shi%2C+J">Jieke Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Zhou Yang</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+J">Junda He</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bowen Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+D">Dongsun Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+D">DongGyun Han</a>, <a href="/search/cs?searchtype=author&amp;query=Lo%2C+D">David Lo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.04986v1-abstract-short" style="display: inline;"> Given the increasing adoption of modern AI-enabled control systems, ensuring their safety and reliability has become a critical task in software testing. One prevalent approach to testing control systems is falsification, which aims to find an input signal that causes the control system to violate a formal safety specification using optimization algorithms. However, applying falsification to AI-en&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.04986v1-abstract-full').style.display = 'inline'; document.getElementById('2410.04986v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.04986v1-abstract-full" style="display: none;"> Given the increasing adoption of modern AI-enabled control systems, ensuring their safety and reliability has become a critical task in software testing. One prevalent approach to testing control systems is falsification, which aims to find an input signal that causes the control system to violate a formal safety specification using optimization algorithms. However, applying falsification to AI-enabled control systems poses two significant challenges: (1)~it requires the system to execute numerous candidate test inputs, which can be time-consuming, particularly for systems with AI models that have many parameters, and (2)~multiple safety requirements are typically defined as a conjunctive specification, which is difficult for existing falsification approaches to comprehensively cover. This paper introduces Synthify, a falsification framework tailored for AI-enabled control systems. Our approach performs falsification in a two-phase process. At the start, Synthify synthesizes a program that implements one or a few linear controllers to serve as a proxy for the AI controller. This proxy program mimics the AI controller&#39;s functionality but is computationally more efficient. Then, Synthify employs the $蔚$-greedy strategy to sample a promising sub-specification from the conjunctive safety specification. It then uses a Simulated Annealing-based falsification algorithm to find violations of the sampled sub-specification for the control system. To evaluate Synthify, we compare it to PSY-TaLiRo, a state-of-the-art and industrial-strength falsification tool, on 8 publicly available control systems. On average, Synthify achieves a 83.5% higher success rate in falsification compared to PSY-TaLiRo with the same budget of falsification trials. The safety violations found by Synthify are also more diverse than those found by PSY-TaLiRo, covering 137.7% more sub-specifications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.04986v1-abstract-full').style.display = 'none'; document.getElementById('2410.04986v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under Review by ACM Transactions on Software Engineering and Methodology (TOSEM)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.03533">arXiv:2410.03533</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.03533">pdf</a>, <a href="https://arxiv.org/format/2410.03533">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> </div> </div> <p class="title is-5 mathjax"> Multiscale fusion enhanced spiking neural network for invasive BCI neural signal decoding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Song%2C+Y">Yu Song</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+L">Liyuan Han</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bo Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+T">Tielin Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.03533v1-abstract-short" style="display: inline;"> Brain-computer interfaces (BCIs) are an advanced fusion of neuroscience and artificial intelligence, requiring stable and long-term decoding of neural signals. Spiking Neural Networks (SNNs), with their neuronal dynamics and spike-based signal processing, are inherently well-suited for this task. This paper presents a novel approach utilizing a Multiscale Fusion enhanced Spiking Neural Network (MF&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.03533v1-abstract-full').style.display = 'inline'; document.getElementById('2410.03533v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.03533v1-abstract-full" style="display: none;"> Brain-computer interfaces (BCIs) are an advanced fusion of neuroscience and artificial intelligence, requiring stable and long-term decoding of neural signals. Spiking Neural Networks (SNNs), with their neuronal dynamics and spike-based signal processing, are inherently well-suited for this task. This paper presents a novel approach utilizing a Multiscale Fusion enhanced Spiking Neural Network (MFSNN). The MFSNN emulates the parallel processing and multiscale feature fusion seen in human visual perception to enable real-time, efficient, and energy-conserving neural signal decoding. Initially, the MFSNN employs temporal convolutional networks and channel attention mechanisms to extract spatiotemporal features from raw data. It then enhances decoding performance by integrating these features through skip connections. Additionally, the MFSNN improves generalizability and robustness in cross-day signal decoding through mini-batch supervised generalization learning. In two benchmark invasive BCI paradigms, including the single-hand grasp-and-touch and center-and-out reach tasks, the MFSNN surpasses traditional artificial neural network methods, such as MLP and GRU, in both accuracy and computational efficiency. Moreover, the MFSNN&#39;s multiscale feature fusion framework is well-suited for the implementation on neuromorphic chips, offering an energy-efficient solution for online decoding of invasive BCI signals. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.03533v1-abstract-full').style.display = 'none'; document.getElementById('2410.03533v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.02378">arXiv:2410.02378</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.02378">pdf</a>, <a href="https://arxiv.org/format/2410.02378">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Towards Comprehensive Detection of Chinese Harmful Memes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Junyu Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bo Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xiaokun Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongbo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Haohao Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+D">Dongyu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+L">Liang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+H">Hongfei Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.02378v1-abstract-short" style="display: inline;"> This paper has been accepted in the NeurIPS 2024 D &amp; B Track. Harmful memes have proliferated on the Chinese Internet, while research on detecting Chinese harmful memes significantly lags behind due to the absence of reliable datasets and effective detectors. To this end, we focus on the comprehensive detection of Chinese harmful memes. We construct ToxiCN MM, the first Chinese harmful meme datase&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02378v1-abstract-full').style.display = 'inline'; document.getElementById('2410.02378v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.02378v1-abstract-full" style="display: none;"> This paper has been accepted in the NeurIPS 2024 D &amp; B Track. Harmful memes have proliferated on the Chinese Internet, while research on detecting Chinese harmful memes significantly lags behind due to the absence of reliable datasets and effective detectors. To this end, we focus on the comprehensive detection of Chinese harmful memes. We construct ToxiCN MM, the first Chinese harmful meme dataset, which consists of 12,000 samples with fine-grained annotations for various meme types. Additionally, we propose a baseline detector, Multimodal Knowledge Enhancement (MKE), incorporating contextual information of meme content generated by the LLM to enhance the understanding of Chinese memes. During the evaluation phase, we conduct extensive quantitative experiments and qualitative analyses on multiple baselines, including LLMs and our MKE. The experimental results indicate that detecting Chinese harmful memes is challenging for existing models while demonstrating the effectiveness of MKE. The resources for this paper are available at https://github.com/DUT-lujunyu/ToxiCN_MM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02378v1-abstract-full').style.display = 'none'; document.getElementById('2410.02378v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.00938">arXiv:2410.00938</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.00938">pdf</a>, <a href="https://arxiv.org/format/2410.00938">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> MoS: Unleashing Parameter Efficiency of Low-Rank Adaptation with Mixture of Shards </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Sheng Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+L">Liheng Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+P">Pengan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+J">Jingwei Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+B">Boyang Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+J">Jiyue Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Kong%2C+L">Lingpeng Kong</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+C">Chuan Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.00938v1-abstract-short" style="display: inline;"> The rapid scaling of large language models necessitates more lightweight finetuning methods to reduce the explosive GPU memory overhead when numerous customized models are served simultaneously. Targeting more parameter-efficient low-rank adaptation (LoRA), parameter sharing presents a promising solution. Empirically, our research into high-level sharing principles highlights the indispensable rol&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00938v1-abstract-full').style.display = 'inline'; document.getElementById('2410.00938v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.00938v1-abstract-full" style="display: none;"> The rapid scaling of large language models necessitates more lightweight finetuning methods to reduce the explosive GPU memory overhead when numerous customized models are served simultaneously. Targeting more parameter-efficient low-rank adaptation (LoRA), parameter sharing presents a promising solution. Empirically, our research into high-level sharing principles highlights the indispensable role of differentiation in reversing the detrimental effects of pure sharing. Guided by this finding, we propose Mixture of Shards (MoS), incorporating both inter-layer and intra-layer sharing schemes, and integrating four nearly cost-free differentiation strategies, namely subset selection, pair dissociation, vector sharding, and shard privatization. Briefly, it selects a designated number of shards from global pools with a Mixture-of-Experts (MoE)-like routing mechanism before sequentially concatenating them to low-rank matrices. Hence, it retains all the advantages of LoRA while offering enhanced parameter efficiency, and effectively circumvents the drawbacks of peer parameter-sharing methods. Our empirical experiments demonstrate approximately 8x parameter savings in a standard LoRA setting. The ablation study confirms the significance of each component. Our insights into parameter sharing and MoS method may illuminate future developments of more parameter-efficient finetuning methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00938v1-abstract-full').style.display = 'none'; document.getElementById('2410.00938v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.00503">arXiv:2410.00503</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.00503">pdf</a>, <a href="https://arxiv.org/format/2410.00503">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Drone Stereo Vision for Radiata Pine Branch Detection and Distance Measurement: Utilizing Deep Learning and YOLO Integration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Y">Yida Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+B">Bing Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+M">Mengjie Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Schofield%2C+S">Sam Schofield</a>, <a href="/search/cs?searchtype=author&amp;query=Green%2C+R">Richard Green</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.00503v2-abstract-short" style="display: inline;"> This research focuses on the development of a drone equipped with pruning tools and a stereo vision camera to accurately detect and measure the spatial positions of tree branches. YOLO is employed for branch segmentation, while two depth estimation approaches, monocular and stereo, are investigated. In comparison to SGBM, deep learning techniques produce more refined and accurate depth maps. In th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00503v2-abstract-full').style.display = 'inline'; document.getElementById('2410.00503v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.00503v2-abstract-full" style="display: none;"> This research focuses on the development of a drone equipped with pruning tools and a stereo vision camera to accurately detect and measure the spatial positions of tree branches. YOLO is employed for branch segmentation, while two depth estimation approaches, monocular and stereo, are investigated. In comparison to SGBM, deep learning techniques produce more refined and accurate depth maps. In the absence of ground-truth data, a fine-tuning process using deep neural networks is applied to approximate optimal depth values. This methodology facilitates precise branch detection and distance measurement, addressing critical challenges in the automation of pruning operations. The results demonstrate notable advancements in both accuracy and efficiency, underscoring the potential of deep learning to drive innovation and enhance automation in the agricultural sector. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00503v2-abstract-full').style.display = 'none'; document.getElementById('2410.00503v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.00361">arXiv:2410.00361</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.00361">pdf</a>, <a href="https://arxiv.org/format/2410.00361">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> PclGPT: A Large Language Model for Patronizing and Condescending Language Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongbo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Mingda Li</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Junyu Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+H">Hebin Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+L">Liang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bo Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+R">Ruizhu Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+H">Hongfei Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.00361v1-abstract-short" style="display: inline;"> Disclaimer: Samples in this paper may be harmful and cause discomfort! Patronizing and condescending language (PCL) is a form of speech directed at vulnerable groups. As an essential branch of toxic language, this type of language exacerbates conflicts and confrontations among Internet communities and detrimentally impacts disadvantaged groups. Traditional pre-trained language models (PLMs) perf&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00361v1-abstract-full').style.display = 'inline'; document.getElementById('2410.00361v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.00361v1-abstract-full" style="display: none;"> Disclaimer: Samples in this paper may be harmful and cause discomfort! Patronizing and condescending language (PCL) is a form of speech directed at vulnerable groups. As an essential branch of toxic language, this type of language exacerbates conflicts and confrontations among Internet communities and detrimentally impacts disadvantaged groups. Traditional pre-trained language models (PLMs) perform poorly in detecting PCL due to its implicit toxicity traits like hypocrisy and false sympathy. With the rise of large language models (LLMs), we can harness their rich emotional semantics to establish a paradigm for exploring implicit toxicity. In this paper, we introduce PclGPT, a comprehensive LLM benchmark designed specifically for PCL. We collect, annotate, and integrate the Pcl-PT/SFT dataset, and then develop a bilingual PclGPT-EN/CN model group through a comprehensive pre-training and supervised fine-tuning staircase process to facilitate implicit toxic detection. Group detection results and fine-grained detection from PclGPT and other models reveal significant variations in the degree of bias in PCL towards different vulnerable groups, necessitating increased societal attention to protect them. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00361v1-abstract-full').style.display = 'none'; document.getElementById('2410.00361v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for EMNLP2024 (Findings)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.19688">arXiv:2409.19688</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.19688">pdf</a>, <a href="https://arxiv.org/format/2409.19688">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Machine Learning for Raman Spectroscopy-based Cyber-Marine Fish Biochemical Composition Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yun Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+G">Gang Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+B">Bing Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+M">Mengjie Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Rooney%2C+J+S">Jeremy S. Rooney</a>, <a href="/search/cs?searchtype=author&amp;query=Lagutin%2C+K">Kirill Lagutin</a>, <a href="/search/cs?searchtype=author&amp;query=MacKenzie%2C+A">Andrew MacKenzie</a>, <a href="/search/cs?searchtype=author&amp;query=Gordon%2C+K+C">Keith C. Gordon</a>, <a href="/search/cs?searchtype=author&amp;query=Killeen%2C+D+P">Daniel P. Killeen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.19688v1-abstract-short" style="display: inline;"> The rapid and accurate detection of biochemical compositions in fish is a crucial real-world task that facilitates optimal utilization and extraction of high-value products in the seafood industry. Raman spectroscopy provides a promising solution for quickly and non-destructively analyzing the biochemical composition of fish by associating Raman spectra with biochemical reference data using machin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19688v1-abstract-full').style.display = 'inline'; document.getElementById('2409.19688v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.19688v1-abstract-full" style="display: none;"> The rapid and accurate detection of biochemical compositions in fish is a crucial real-world task that facilitates optimal utilization and extraction of high-value products in the seafood industry. Raman spectroscopy provides a promising solution for quickly and non-destructively analyzing the biochemical composition of fish by associating Raman spectra with biochemical reference data using machine learning regression models. This paper investigates different regression models to address this task and proposes a new design of Convolutional Neural Networks (CNNs) for jointly predicting water, protein, and lipids yield. To the best of our knowledge, we are the first to conduct a successful study employing CNNs to analyze the biochemical composition of fish based on a very small Raman spectroscopic dataset. Our approach combines a tailored CNN architecture with the comprehensive data preparation procedure, effectively mitigating the challenges posed by extreme data scarcity. The results demonstrate that our CNN can significantly outperform two state-of-the-art CNN models and multiple traditional machine learning models, paving the way for accurate and automated analysis of fish biochemical composition. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19688v1-abstract-full').style.display = 'none'; document.getElementById('2409.19688v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.18486">arXiv:2409.18486</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.18486">pdf</a>, <a href="https://arxiv.org/format/2409.18486">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Evaluation of OpenAI o1: Opportunities and Challenges of AGI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhong%2C+T">Tianyang Zhong</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhengliang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+Y">Yi Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yutong Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yifan Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+S">Shizhe Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zihao Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Lyu%2C+Y">Yanjun Lyu</a>, <a href="/search/cs?searchtype=author&amp;query=Shu%2C+P">Peng Shu</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+X">Xiaowei Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+C">Chao Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+H">Hanqi Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Hanxu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yiwei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Junhao Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+H">Huawen Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yihen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+H">Huaqin Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+S">Shaochen Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+H">Haixing Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+L">Lin Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+R">Ruidong Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+W">Wei Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Zhenyuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jingyuan Chen</a> , et al. (53 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.18486v1-abstract-short" style="display: inline;"> This comprehensive study evaluates the performance of OpenAI&#39;s o1-preview large language model across a diverse array of complex reasoning tasks, spanning multiple domains, including computer science, mathematics, natural sciences, medicine, linguistics, and social sciences. Through rigorous testing, o1-preview demonstrated remarkable capabilities, often achieving human-level or superior performan&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.18486v1-abstract-full').style.display = 'inline'; document.getElementById('2409.18486v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.18486v1-abstract-full" style="display: none;"> This comprehensive study evaluates the performance of OpenAI&#39;s o1-preview large language model across a diverse array of complex reasoning tasks, spanning multiple domains, including computer science, mathematics, natural sciences, medicine, linguistics, and social sciences. Through rigorous testing, o1-preview demonstrated remarkable capabilities, often achieving human-level or superior performance in areas ranging from coding challenges to scientific reasoning and from language processing to creative problem-solving. Key findings include: -83.3% success rate in solving complex competitive programming problems, surpassing many human experts. -Superior ability in generating coherent and accurate radiology reports, outperforming other evaluated models. -100% accuracy in high school-level mathematical reasoning tasks, providing detailed step-by-step solutions. -Advanced natural language inference capabilities across general and specialized domains like medicine. -Impressive performance in chip design tasks, outperforming specialized models in areas such as EDA script generation and bug analysis. -Remarkable proficiency in anthropology and geology, demonstrating deep understanding and reasoning in these specialized fields. -Strong capabilities in quantitative investing. O1 has comprehensive financial knowledge and statistical modeling skills. -Effective performance in social media analysis, including sentiment analysis and emotion recognition. The model excelled particularly in tasks requiring intricate reasoning and knowledge integration across various fields. While some limitations were observed, including occasional errors on simpler problems and challenges with certain highly specialized concepts, the overall results indicate significant progress towards artificial general intelligence. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.18486v1-abstract-full').style.display = 'none'; document.getElementById('2409.18486v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.17526">arXiv:2409.17526</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.17526">pdf</a>, <a href="https://arxiv.org/format/2409.17526">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Drone Stereo Vision for Radiata Pine Branch Detection and Distance Measurement: Integrating SGBM and Segmentation Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Y">Yida Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+B">Bing Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+M">Mengjie Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Schofield%2C+S">Sam Schofield</a>, <a href="/search/cs?searchtype=author&amp;query=Green%2C+R">Richard Green</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.17526v1-abstract-short" style="display: inline;"> Manual pruning of radiata pine trees presents significant safety risks due to their substantial height and the challenging terrains in which they thrive. To address these risks, this research proposes the development of a drone-based pruning system equipped with specialized pruning tools and a stereo vision camera, enabling precise detection and trimming of branches. Deep learning algorithms, incl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.17526v1-abstract-full').style.display = 'inline'; document.getElementById('2409.17526v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.17526v1-abstract-full" style="display: none;"> Manual pruning of radiata pine trees presents significant safety risks due to their substantial height and the challenging terrains in which they thrive. To address these risks, this research proposes the development of a drone-based pruning system equipped with specialized pruning tools and a stereo vision camera, enabling precise detection and trimming of branches. Deep learning algorithms, including YOLO and Mask R-CNN, are employed to ensure accurate branch detection, while the Semi-Global Matching algorithm is integrated to provide reliable distance estimation. The synergy between these techniques facilitates the precise identification of branch locations and enables efficient, targeted pruning. Experimental results demonstrate that the combined implementation of YOLO and SGBM enables the drone to accurately detect branches and measure their distances from the drone. This research not only improves the safety and efficiency of pruning operations but also makes a significant contribution to the advancement of drone technology in the automation of agricultural and forestry practices, laying a foundational framework for further innovations in environmental management. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.17526v1-abstract-full').style.display = 'none'; document.getElementById('2409.17526v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.16788">arXiv:2409.16788</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.16788">pdf</a>, <a href="https://arxiv.org/format/2409.16788">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Mitigating the Bias of Large Language Model Evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+H">Hongli Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Hui Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Long%2C+Y">Yunfei Long</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bing Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+C">Conghui Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+H">Hailong Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+M">Muyun Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+T">Tiejun Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.16788v1-abstract-short" style="display: inline;"> Recently, there has been a trend of evaluating the Large Language Model (LLM) quality in the flavor of LLM-as-a-Judge, namely leveraging another LLM to evaluate the current output quality. However, existing judges are proven to be biased, namely they would favor answers which present better superficial quality (such as verbosity, fluency) while ignoring the instruction following ability. In this w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16788v1-abstract-full').style.display = 'inline'; document.getElementById('2409.16788v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.16788v1-abstract-full" style="display: none;"> Recently, there has been a trend of evaluating the Large Language Model (LLM) quality in the flavor of LLM-as-a-Judge, namely leveraging another LLM to evaluate the current output quality. However, existing judges are proven to be biased, namely they would favor answers which present better superficial quality (such as verbosity, fluency) while ignoring the instruction following ability. In this work, we propose systematic research about the bias of LLM-as-a-Judge. Specifically, for closed-source judge models, we apply calibration to mitigate the significance of superficial quality, both on probability level and prompt level. For open-source judge models, we propose to mitigate the bias by contrastive training, with curated negative samples that deviate from instruction but present better superficial quality. We apply our methods on the bias evaluation benchmark, and experiment results show our methods mitigate the bias by a large margin while maintaining a satisfactory evaluation accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16788v1-abstract-full').style.display = 'none'; document.getElementById('2409.16788v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.15866">arXiv:2409.15866</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.15866">pdf</a>, <a href="https://arxiv.org/format/2409.15866">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Multi-UAV Pursuit-Evasion with Online Planning in Unknown Environments by Deep Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jiayu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+C">Chao Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+G">Guosheng Li</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+W">Wenhao Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+X">Xinyi Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Botian Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+H">Huazhong Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yu Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.15866v2-abstract-short" style="display: inline;"> Multi-UAV pursuit-evasion, where pursuers aim to capture evaders, poses a key challenge for UAV swarm intelligence. Multi-agent reinforcement learning (MARL) has demonstrated potential in modeling cooperative behaviors, but most RL-based approaches remain constrained to simplified simulations with limited dynamics or fixed scenarios. Previous attempts to deploy RL policy to real-world pursuit-evas&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.15866v2-abstract-full').style.display = 'inline'; document.getElementById('2409.15866v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.15866v2-abstract-full" style="display: none;"> Multi-UAV pursuit-evasion, where pursuers aim to capture evaders, poses a key challenge for UAV swarm intelligence. Multi-agent reinforcement learning (MARL) has demonstrated potential in modeling cooperative behaviors, but most RL-based approaches remain constrained to simplified simulations with limited dynamics or fixed scenarios. Previous attempts to deploy RL policy to real-world pursuit-evasion are largely restricted to two-dimensional scenarios, such as ground vehicles or UAVs at fixed altitudes. In this paper, we address multi-UAV pursuit-evasion by considering UAV dynamics and physical constraints. We introduce an evader prediction-enhanced network to tackle partial observability in cooperative strategy learning. Additionally, we propose an adaptive environment generator within MARL training, enabling higher exploration efficiency and better policy generalization across diverse scenarios. Simulations show our method significantly outperforms all baselines in challenging scenarios, generalizing to unseen scenarios with a 100% capture rate. Finally, we derive a feasible policy via a two-stage reward refinement and deploy the policy on real quadrotors in a zero-shot manner. To our knowledge, this is the first work to derive and deploy an RL-based policy using collective thrust and body rates control commands for multi-UAV pursuit-evasion in unknown environments. The open-source code and videos are available at https://sites.google.com/view/pursuit-evasion-rl. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.15866v2-abstract-full').style.display = 'none'; document.getElementById('2409.15866v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.15375">arXiv:2409.15375</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.15375">pdf</a>, <a href="https://arxiv.org/format/2409.15375">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> DS2TA: Denoising Spiking Transformer with Attenuated Spatiotemporal Attention </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Boxun Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Geng%2C+H">Hejia Geng</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+Y">Yuxuan Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+P">Peng Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.15375v1-abstract-short" style="display: inline;"> Vision Transformers (ViT) are current high-performance models of choice for various vision applications. Recent developments have given rise to biologically inspired spiking transformers that thrive in ultra-low power operations on neuromorphic hardware, however, without fully unlocking the potential of spiking neural networks. We introduce DS2TA, a Denoising Spiking transformer with attenuated Sp&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.15375v1-abstract-full').style.display = 'inline'; document.getElementById('2409.15375v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.15375v1-abstract-full" style="display: none;"> Vision Transformers (ViT) are current high-performance models of choice for various vision applications. Recent developments have given rise to biologically inspired spiking transformers that thrive in ultra-low power operations on neuromorphic hardware, however, without fully unlocking the potential of spiking neural networks. We introduce DS2TA, a Denoising Spiking transformer with attenuated SpatioTemporal Attention, designed specifically for vision applications. DS2TA introduces a new spiking attenuated spatiotemporal attention mechanism that considers input firing correlations occurring in both time and space, thereby fully harnessing the computational power of spiking neurons at the core of the transformer architecture. Importantly, DS2TA facilitates parameter-efficient spatiotemporal attention computation without introducing extra weights. DS2TA employs efficient hashmap-based nonlinear spiking attention denoisers to enhance the robustness and expressive power of spiking attention maps. DS2TA demonstrates state-of-the-art performances on several widely adopted static image and dynamic neuromorphic datasets. Operated over 4 time steps, DS2TA achieves 94.92% top-1 accuracy on CIFAR10 and 77.47% top-1 accuracy on CIFAR100, as well as 79.1% and 94.44% on CIFAR10-DVS and DVS-Gesture using 10 time steps. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.15375v1-abstract-full').style.display = 'none'; document.getElementById('2409.15375v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: text overlap with arXiv:2311.09376</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.14617">arXiv:2409.14617</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.14617">pdf</a>, <a href="https://arxiv.org/format/2409.14617">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Biomolecules">q-bio.BM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> Protein-Mamba: Biological Mamba Models for Protein Function Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bohao Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+Y">Yingzhou Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Inoue%2C+Y">Yoshitaka Inoue</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+N">Namkyeong Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+T">Tianfan Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jintai Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.14617v1-abstract-short" style="display: inline;"> Protein function prediction is a pivotal task in drug discovery, significantly impacting the development of effective and safe therapeutics. Traditional machine learning models often struggle with the complexity and variability inherent in predicting protein functions, necessitating more sophisticated approaches. In this work, we introduce Protein-Mamba, a novel two-stage model that leverages both&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.14617v1-abstract-full').style.display = 'inline'; document.getElementById('2409.14617v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.14617v1-abstract-full" style="display: none;"> Protein function prediction is a pivotal task in drug discovery, significantly impacting the development of effective and safe therapeutics. Traditional machine learning models often struggle with the complexity and variability inherent in predicting protein functions, necessitating more sophisticated approaches. In this work, we introduce Protein-Mamba, a novel two-stage model that leverages both self-supervised learning and fine-tuning to improve protein function prediction. The pre-training stage allows the model to capture general chemical structures and relationships from large, unlabeled datasets, while the fine-tuning stage refines these insights using specific labeled datasets, resulting in superior prediction performance. Our extensive experiments demonstrate that Protein-Mamba achieves competitive performance, compared with a couple of state-of-the-art methods across a range of protein function datasets. This model&#39;s ability to effectively utilize both unlabeled and labeled data highlights the potential of self-supervised learning in advancing protein function prediction and offers a promising direction for future research in drug discovery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.14617v1-abstract-full').style.display = 'none'; document.getElementById('2409.14617v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.14019">arXiv:2409.14019</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.14019">pdf</a>, <a href="https://arxiv.org/format/2409.14019">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/LRA.2024.3466077">10.1109/LRA.2024.3466077 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> MOSE: Monocular Semantic Reconstruction Using NeRF-Lifted Noisy Priors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Du%2C+Z">Zhenhua Du</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Binbin Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Haoyu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Huo%2C+K">Kai Huo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhi%2C+S">Shuaifeng Zhi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.14019v1-abstract-short" style="display: inline;"> Accurately reconstructing dense and semantically annotated 3D meshes from monocular images remains a challenging task due to the lack of geometry guidance and imperfect view-dependent 2D priors. Though we have witnessed recent advancements in implicit neural scene representations enabling precise 2D rendering simply from multi-view images, there have been few works addressing 3D scene understandin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.14019v1-abstract-full').style.display = 'inline'; document.getElementById('2409.14019v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.14019v1-abstract-full" style="display: none;"> Accurately reconstructing dense and semantically annotated 3D meshes from monocular images remains a challenging task due to the lack of geometry guidance and imperfect view-dependent 2D priors. Though we have witnessed recent advancements in implicit neural scene representations enabling precise 2D rendering simply from multi-view images, there have been few works addressing 3D scene understanding with monocular priors alone. In this paper, we propose MOSE, a neural field semantic reconstruction approach to lift inferred image-level noisy priors to 3D, producing accurate semantics and geometry in both 3D and 2D space. The key motivation for our method is to leverage generic class-agnostic segment masks as guidance to promote local consistency of rendered semantics during training. With the help of semantics, we further apply a smoothness regularization to texture-less regions for better geometric quality, thus achieving mutual benefits of geometry and semantics. Experiments on the ScanNet dataset show that our MOSE outperforms relevant baselines across all metrics on tasks of 3D semantic segmentation, 2D semantic segmentation and 3D surface reconstruction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.14019v1-abstract-full').style.display = 'none'; document.getElementById('2409.14019v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 10 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.13919">arXiv:2409.13919</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.13919">pdf</a>, <a href="https://arxiv.org/format/2409.13919">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Measuring Error Alignment for Decision-Making Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Binxia Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Bikakis%2C+A">Antonis Bikakis</a>, <a href="/search/cs?searchtype=author&amp;query=Onah%2C+D">Daniel Onah</a>, <a href="/search/cs?searchtype=author&amp;query=Vlachidis%2C+A">Andreas Vlachidis</a>, <a href="/search/cs?searchtype=author&amp;query=Dickens%2C+L">Luke Dickens</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.13919v1-abstract-short" style="display: inline;"> Given that AI systems are set to play a pivotal role in future decision-making processes, their trustworthiness and reliability are of critical concern. Due to their scale and complexity, modern AI systems resist direct interpretation, and alternative ways are needed to establish trust in those systems, and determine how well they align with human values. We argue that good measures of the informa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13919v1-abstract-full').style.display = 'inline'; document.getElementById('2409.13919v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.13919v1-abstract-full" style="display: none;"> Given that AI systems are set to play a pivotal role in future decision-making processes, their trustworthiness and reliability are of critical concern. Due to their scale and complexity, modern AI systems resist direct interpretation, and alternative ways are needed to establish trust in those systems, and determine how well they align with human values. We argue that good measures of the information processing similarities between AI and humans, may be able to achieve these same ends. While Representational alignment (RA) approaches measure similarity between the internal states of two systems, the associated data can be expensive and difficult to collect for human systems. In contrast, Behavioural alignment (BA) comparisons are cheaper and easier, but questions remain as to their sensitivity and reliability. We propose two new behavioural alignment metrics misclassification agreement which measures the similarity between the errors of two systems on the same instances, and class-level error similarity which measures the similarity between the error distributions of two systems. We show that our metrics correlate well with RA metrics, and provide complementary information to another BA metric, within a range of domains, and set the scene for a new approach to value alignment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13919v1-abstract-full').style.display = 'none'; document.getElementById('2409.13919v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.13741">arXiv:2409.13741</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.13741">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> Knowing When to Ask -- Bridging Large Language Models and Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Radhakrishnan%2C+P">Prashanth Radhakrishnan</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jennifer Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bo Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Ramaswami%2C+P">Prem Ramaswami</a>, <a href="/search/cs?searchtype=author&amp;query=Pho%2C+H">Hannah Pho</a>, <a href="/search/cs?searchtype=author&amp;query=Olmos%2C+A">Adriana Olmos</a>, <a href="/search/cs?searchtype=author&amp;query=Manyika%2C+J">James Manyika</a>, <a href="/search/cs?searchtype=author&amp;query=Guha%2C+R+V">R. V. Guha</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.13741v1-abstract-short" style="display: inline;"> Large Language Models (LLMs) are prone to generating factually incorrect information when responding to queries that involve numerical and statistical data or other timely facts. In this paper, we present an approach for enhancing the accuracy of LLMs by integrating them with Data Commons, a vast, open-source repository of public statistics from trusted organizations like the United Nations (UN),&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13741v1-abstract-full').style.display = 'inline'; document.getElementById('2409.13741v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.13741v1-abstract-full" style="display: none;"> Large Language Models (LLMs) are prone to generating factually incorrect information when responding to queries that involve numerical and statistical data or other timely facts. In this paper, we present an approach for enhancing the accuracy of LLMs by integrating them with Data Commons, a vast, open-source repository of public statistics from trusted organizations like the United Nations (UN), Center for Disease Control and Prevention (CDC) and global census bureaus. We explore two primary methods: Retrieval Interleaved Generation (RIG), where the LLM is trained to produce natural language queries to retrieve data from Data Commons, and Retrieval Augmented Generation (RAG), where relevant data tables are fetched from Data Commons and used to augment the LLM&#39;s prompt. We evaluate these methods on a diverse set of queries, demonstrating their effectiveness in improving the factual accuracy of LLM outputs. Our work represents an early step towards building more trustworthy and reliable LLMs that are grounded in verifiable statistical data and capable of complex factual reasoning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13741v1-abstract-full').style.display = 'none'; document.getElementById('2409.13741v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">39 pages - 25 page paper, 14 page Appendix, 7 figures, 9 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.13730">arXiv:2409.13730</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.13730">pdf</a>, <a href="https://arxiv.org/format/2409.13730">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> VisScience: An Extensive Benchmark for Evaluating K12 Educational Multi-modal Scientific Reasoning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhihuan Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Zhen Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jinhao Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+Z">Zhengxiao Du</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Weihan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bin Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+Y">Yuxiao Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+J">Jie Tang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.13730v1-abstract-short" style="display: inline;"> Multi-modal large language models (MLLMs) have demonstrated promising capabilities across various tasks by integrating textual and visual information to achieve visual understanding in complex scenarios. Despite the availability of several benchmarks aims to evaluating MLLMs in tasks from visual question answering to complex problem-solving, most focus predominantly on mathematics or general visua&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13730v1-abstract-full').style.display = 'inline'; document.getElementById('2409.13730v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.13730v1-abstract-full" style="display: none;"> Multi-modal large language models (MLLMs) have demonstrated promising capabilities across various tasks by integrating textual and visual information to achieve visual understanding in complex scenarios. Despite the availability of several benchmarks aims to evaluating MLLMs in tasks from visual question answering to complex problem-solving, most focus predominantly on mathematics or general visual understanding tasks. This reveals a critical gap in current benchmarks, which often overlook the inclusion of other key scientific disciplines such as physics and chemistry. To address this gap, we meticulously construct a comprehensive benchmark, named VisScience, which is utilized to assess the multi-modal scientific reasoning across the three disciplines of mathematics, physics, and chemistry. This benchmark comprises 3,000 questions drawn from K12 education - spanning elementary school through high school - equally distributed across three disciplines, with 1,000 questions per discipline. The questions within VisScience span 21 distinct subjects and are categorized into five difficulty levels, offering a broad spectrum of topics within each discipline. With VisScience, we present a detailed evaluation of the performance of 25 representative MLLMs in scientific reasoning. Experimental results demonstrate that closed-source MLLMs generally outperform open-source models. The best performance observed include a 53.4\% accuracy in mathematics by Claude3.5-Sonnet, 38.2\% in physics by GPT-4o, and 47.0\% in chemistry by Gemini-1.5-Pro. These results underscore the strengths and limitations of MLLMs, suggesting areas for future improvement and highlighting the importance of developing models that can effectively handle the diverse demands of multi-modal scientific reasoning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13730v1-abstract-full').style.display = 'none'; document.getElementById('2409.13730v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">89 pages, 70 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.13729">arXiv:2409.13729</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.13729">pdf</a>, <a href="https://arxiv.org/format/2409.13729">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> MathGLM-Vision: Solving Mathematical Problems with Multi-Modal Large Language Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Zhen Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jinhao Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+Z">Zhengxiao Du</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+W">Wenmeng Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Weihan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+W">Wenyi Hong</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhihuan Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bin Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+Y">Yuxiao Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+J">Jie Tang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.13729v1-abstract-short" style="display: inline;"> Large language models (LLMs) have demonstrated significant capabilities in mathematical reasoning, particularly with text-based mathematical problems. However, current multi-modal large language models (MLLMs), especially those specialized in mathematics, tend to focus predominantly on solving geometric problems but ignore the diversity of visual information available in other areas of mathematics&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13729v1-abstract-full').style.display = 'inline'; document.getElementById('2409.13729v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.13729v1-abstract-full" style="display: none;"> Large language models (LLMs) have demonstrated significant capabilities in mathematical reasoning, particularly with text-based mathematical problems. However, current multi-modal large language models (MLLMs), especially those specialized in mathematics, tend to focus predominantly on solving geometric problems but ignore the diversity of visual information available in other areas of mathematics. Moreover, the geometric information for these specialized mathematical MLLMs is derived from several public datasets, which are typically limited in diversity and complexity. To address these limitations, we aim to construct a fine-tuning dataset named MathVL, and develop a series of specialized mathematical MLLMs termed MathGLM-Vision by conducting Supervised Fine-Tuning (SFT) on MathVL with various parameter-scale backbones. To extensively evaluate the effectiveness of MathGLM-Vision, we conduct experiments on several public benchmarks and our curated MathVL-test consisting of 2,000 problems. Experimental results demonstrate that MathGLM-Vision achieves significant improvements compared with some existing models, including backbone models and open-source mathematical MLLMs. These findings indicate the importance of diversity dataset in enhancing the mathematical reasoning abilities of MLLMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13729v1-abstract-full').style.display = 'none'; document.getElementById('2409.13729v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">30 pages,19 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.13712">arXiv:2409.13712</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.13712">pdf</a>, <a href="https://arxiv.org/format/2409.13712">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Good Idea or Not, Representation of LLM Could Tell </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Yi Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+B">Bo Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Sheng%2C+S">Shuqian Sheng</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+C">Cheng Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+J">Jiaxin Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+Z">Zanwei Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+L">Luoyi Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xinbing Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+C">Chenghu Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.13712v1-abstract-short" style="display: inline;"> In the ever-expanding landscape of academic research, the proliferation of ideas presents a significant challenge for researchers: discerning valuable ideas from the less impactful ones. The ability to efficiently evaluate the potential of these ideas is crucial for the advancement of science and paper review. In this work, we focus on idea assessment, which aims to leverage the knowledge of large&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13712v1-abstract-full').style.display = 'inline'; document.getElementById('2409.13712v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.13712v1-abstract-full" style="display: none;"> In the ever-expanding landscape of academic research, the proliferation of ideas presents a significant challenge for researchers: discerning valuable ideas from the less impactful ones. The ability to efficiently evaluate the potential of these ideas is crucial for the advancement of science and paper review. In this work, we focus on idea assessment, which aims to leverage the knowledge of large language models to assess the merit of scientific ideas. First, we investigate existing text evaluation research and define the problem of quantitative evaluation of ideas. Second, we curate and release a benchmark dataset from nearly four thousand manuscript papers with full texts, meticulously designed to train and evaluate the performance of different approaches to this task. Third, we establish a framework for quantifying the value of ideas by employing representations in a specific layer of large language models. Experimental results show that the scores predicted by our method are relatively consistent with those of humans. Our findings suggest that the representations of large language models hold more potential in quantifying the value of ideas than their generative outputs, demonstrating a promising avenue for automating the idea assessment process. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13712v1-abstract-full').style.display = 'none'; document.getElementById('2409.13712v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Xu%2C+B&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Xu%2C+B&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Xu%2C+B&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Xu%2C+B&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Xu%2C+B&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Xu%2C+B&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10