CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 90 results for author: <span class="mathjax">Jia, M</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Jia%2C+M">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Jia, M"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Jia%2C+M&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Jia, M"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Jia%2C+M&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Jia%2C+M&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Jia%2C+M&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16707">arXiv:2411.16707</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16707">pdf</a>, <a href="https://arxiv.org/format/2411.16707">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Enhancing LLMs for Power System Simulations: A Feedback-driven Multi-agent Framework </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengshuo Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+Z">Zeyu Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Hug%2C+G">Gabriela Hug</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16707v1-abstract-short" style="display: inline;"> The integration of experimental technologies with large language models (LLMs) is transforming scientific research, positioning AI as a versatile research assistant rather than a mere problem-solving tool. In the field of power systems, however, managing simulations -- one of the essential experimental technologies -- remains a challenge for LLMs due to their limited domain-specific knowledge, res&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16707v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16707v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16707v1-abstract-full" style="display: none;"> The integration of experimental technologies with large language models (LLMs) is transforming scientific research, positioning AI as a versatile research assistant rather than a mere problem-solving tool. In the field of power systems, however, managing simulations -- one of the essential experimental technologies -- remains a challenge for LLMs due to their limited domain-specific knowledge, restricted reasoning capabilities, and imprecise handling of simulation parameters. To address these limitations, we propose a feedback-driven, multi-agent framework that incorporates three proposed modules: an enhanced retrieval-augmented generation (RAG) module, an improved reasoning module, and a dynamic environmental acting module with an error-feedback mechanism. Validated on 69 diverse tasks from Daline and MATPOWER, this framework achieves success rates of 93.13% and 96.85%, respectively, significantly outperforming the latest LLMs (ChatGPT 4o and o1-preview), which achieved a 27.77% success rate on standard simulation tasks and 0% on complex tasks. Additionally, our framework also supports rapid, cost-effective task execution, completing each simulation in approximately 30 seconds at an average cost of 0.014 USD for tokens. Overall, this adaptable framework lays a foundation for developing intelligent LLM-based assistants for human researchers, facilitating power system research and beyond. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16707v1-abstract-full').style.display = 'none'; document.getElementById('2411.16707v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11350">arXiv:2411.11350</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11350">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Zero-Shot Load Forecasting with Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liao%2C+W">Wenlong Liao</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Zhe Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengshuo Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Rehtanz%2C+C">Christian Rehtanz</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+J">Jiannong Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Port%C3%A9-Agel%2C+F">Fernando Port茅-Agel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11350v1-abstract-short" style="display: inline;"> Deep learning models have shown strong performance in load forecasting, but they generally require large amounts of data for model training before being applied to new scenarios, which limits their effectiveness in data-scarce scenarios. Inspired by the great success of pre-trained language models (LLMs) in natural language processing, this paper proposes a zero-shot load forecasting approach usin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11350v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11350v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11350v1-abstract-full" style="display: none;"> Deep learning models have shown strong performance in load forecasting, but they generally require large amounts of data for model training before being applied to new scenarios, which limits their effectiveness in data-scarce scenarios. Inspired by the great success of pre-trained language models (LLMs) in natural language processing, this paper proposes a zero-shot load forecasting approach using an advanced LLM framework denoted as the Chronos model. By utilizing its extensive pre-trained knowledge, the Chronos model enables accurate load forecasting in data-scarce scenarios without the need for extensive data-specific training. Simulation results across five real-world datasets demonstrate that the Chronos model significantly outperforms nine popular baseline models for both deterministic and probabilistic load forecasting with various forecast horizons (e.g., 1 to 48 hours), even though the Chronos model is neither tailored nor fine-tuned to these specific load datasets. Notably, Chronos reduces root mean squared error (RMSE), continuous ranked probability score (CRPS), and quantile score (QS) by approximately 7.34%-84.30%, 19.63%-60.06%, and 22.83%-54.49%, respectively, compared to baseline models. These results highlight the superiority and flexibility of the Chronos model, positioning it as an effective solution in data-scarce scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11350v1-abstract-full').style.display = 'none'; document.getElementById('2411.11350v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages,5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02397">arXiv:2411.02397</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02397">pdf</a>, <a href="https://arxiv.org/format/2411.02397">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Adaptive Caching for Faster Video Generation with Diffusion Transformers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kahatapitiya%2C+K">Kumara Kahatapitiya</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Haozhe Liu</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+S">Sen He</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+D">Ding Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Menglin Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chenyang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ryoo%2C+M+S">Michael S. Ryoo</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+T">Tian Xie</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02397v2-abstract-short" style="display: inline;"> Generating temporally-consistent high-fidelity videos can be computationally expensive, especially over longer temporal spans. More-recent Diffusion Transformers (DiTs) -- despite making significant headway in this context -- have only heightened such challenges as they rely on larger models and heavier attention mechanisms, resulting in slower inference speeds. In this paper, we introduce a train&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02397v2-abstract-full').style.display = 'inline'; document.getElementById('2411.02397v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02397v2-abstract-full" style="display: none;"> Generating temporally-consistent high-fidelity videos can be computationally expensive, especially over longer temporal spans. More-recent Diffusion Transformers (DiTs) -- despite making significant headway in this context -- have only heightened such challenges as they rely on larger models and heavier attention mechanisms, resulting in slower inference speeds. In this paper, we introduce a training-free method to accelerate video DiTs, termed Adaptive Caching (AdaCache), which is motivated by the fact that &#34;not all videos are created equal&#34;: meaning, some videos require fewer denoising steps to attain a reasonable quality than others. Building on this, we not only cache computations through the diffusion process, but also devise a caching schedule tailored to each video generation, maximizing the quality-latency trade-off. We further introduce a Motion Regularization (MoReg) scheme to utilize video information within AdaCache, essentially controlling the compute allocation based on motion content. Altogether, our plug-and-play contributions grant significant inference speedups (e.g. up to 4.7x on Open-Sora 720p - 2s video generation) without sacrificing the generation quality, across multiple video DiT baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02397v2-abstract-full').style.display = 'none'; document.getElementById('2411.02397v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project-page is available at https://adacache-dit.github.io</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.22108">arXiv:2410.22108</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.22108">pdf</a>, <a href="https://arxiv.org/format/2410.22108">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Protecting Privacy in Multimodal Large Language Models with MLLMU-Bench </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zheyuan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Dou%2C+G">Guangyao Dou</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengzhao Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+Z">Zhaoxuan Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+Q">Qingkai Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+Y">Yongle Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+M">Meng Jiang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.22108v1-abstract-short" style="display: inline;"> Generative models such as Large Language Models (LLM) and Multimodal Large Language models (MLLMs) trained on massive web corpora can memorize and disclose individuals&#39; confidential and private data, raising legal and ethical concerns. While many previous works have addressed this issue in LLM via machine unlearning, it remains largely unexplored for MLLMs. To tackle this challenge, we introduce M&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22108v1-abstract-full').style.display = 'inline'; document.getElementById('2410.22108v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.22108v1-abstract-full" style="display: none;"> Generative models such as Large Language Models (LLM) and Multimodal Large Language models (MLLMs) trained on massive web corpora can memorize and disclose individuals&#39; confidential and private data, raising legal and ethical concerns. While many previous works have addressed this issue in LLM via machine unlearning, it remains largely unexplored for MLLMs. To tackle this challenge, we introduce Multimodal Large Language Model Unlearning Benchmark (MLLMU-Bench), a novel benchmark aimed at advancing the understanding of multimodal machine unlearning. MLLMU-Bench consists of 500 fictitious profiles and 153 profiles for public celebrities, each profile feature over 14 customized question-answer pairs, evaluated from both multimodal (image+text) and unimodal (text) perspectives. The benchmark is divided into four sets to assess unlearning algorithms in terms of efficacy, generalizability, and model utility. Finally, we provide baseline results using existing generative model unlearning algorithms. Surprisingly, our experiments show that unimodal unlearning algorithms excel in generation and cloze tasks, while multimodal unlearning approaches perform better in classification tasks with multimodal inputs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22108v1-abstract-full').style.display = 'none'; document.getElementById('2410.22108v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">30 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.20280">arXiv:2410.20280</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.20280">pdf</a>, <a href="https://arxiv.org/format/2410.20280">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> MarDini: Masked Autoregressive Diffusion for Video Generation at Scale </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Haozhe Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shikun Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Z">Zijian Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+M">Mengmeng Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Y">Yanping Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+X">Xiao Han</a>, <a href="/search/cs?searchtype=author&amp;query=P%C3%A9rez%2C+J+C">Juan C. P茅rez</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+D">Ding Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Kahatapitiya%2C+K">Kumara Kahatapitiya</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Menglin Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+J">Jui-Chieh Wu</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+S">Sen He</a>, <a href="/search/cs?searchtype=author&amp;query=Xiang%2C+T">Tao Xiang</a>, <a href="/search/cs?searchtype=author&amp;query=Schmidhuber%2C+J">J眉rgen Schmidhuber</a>, <a href="/search/cs?searchtype=author&amp;query=P%C3%A9rez-R%C3%BAa%2C+J">Juan-Manuel P茅rez-R煤a</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.20280v1-abstract-short" style="display: inline;"> We introduce MarDini, a new family of video diffusion models that integrate the advantages of masked auto-regression (MAR) into a unified diffusion model (DM) framework. Here, MAR handles temporal planning, while DM focuses on spatial generation in an asymmetric network design: i) a MAR-based planning model containing most of the parameters generates planning signals for each masked frame using lo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20280v1-abstract-full').style.display = 'inline'; document.getElementById('2410.20280v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.20280v1-abstract-full" style="display: none;"> We introduce MarDini, a new family of video diffusion models that integrate the advantages of masked auto-regression (MAR) into a unified diffusion model (DM) framework. Here, MAR handles temporal planning, while DM focuses on spatial generation in an asymmetric network design: i) a MAR-based planning model containing most of the parameters generates planning signals for each masked frame using low-resolution input; ii) a lightweight generation model uses these signals to produce high-resolution frames via diffusion de-noising. MarDini&#39;s MAR enables video generation conditioned on any number of masked frames at any frame positions: a single model can handle video interpolation (e.g., masking middle frames), image-to-video generation (e.g., masking from the second frame onward), and video expansion (e.g., masking half the frames). The efficient design allocates most of the computational resources to the low-resolution planning model, making computationally expensive but important spatio-temporal attention feasible at scale. MarDini sets a new state-of-the-art for video interpolation; meanwhile, within few inference steps, it efficiently generates videos on par with those of much more expensive advanced image-to-video models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20280v1-abstract-full').style.display = 'none'; document.getElementById('2410.20280v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project Page: https://mardini-vidgen.github.io</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.14684">arXiv:2410.14684</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.14684">pdf</a>, <a href="https://arxiv.org/format/2410.14684">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> RepoGraph: Enhancing AI Software Engineering with Repository-level Code Graph </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ouyang%2C+S">Siru Ouyang</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+W">Wenhao Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+K">Kaixin Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+Z">Zilin Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhihan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengzhao Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Jiawei Han</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Hongming Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+D">Dong Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.14684v1-abstract-short" style="display: inline;"> Large Language Models (LLMs) excel in code generation yet struggle with modern AI software engineering tasks. Unlike traditional function-level or file-level coding tasks, AI software engineering requires not only basic coding proficiency but also advanced skills in managing and interacting with code repositories. However, existing methods often overlook the need for repository-level code understa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14684v1-abstract-full').style.display = 'inline'; document.getElementById('2410.14684v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.14684v1-abstract-full" style="display: none;"> Large Language Models (LLMs) excel in code generation yet struggle with modern AI software engineering tasks. Unlike traditional function-level or file-level coding tasks, AI software engineering requires not only basic coding proficiency but also advanced skills in managing and interacting with code repositories. However, existing methods often overlook the need for repository-level code understanding, which is crucial for accurately grasping the broader context and developing effective solutions. On this basis, we present RepoGraph, a plug-in module that manages a repository-level structure for modern AI software engineering solutions. RepoGraph offers the desired guidance and serves as a repository-wide navigation for AI software engineers. We evaluate RepoGraph on the SWE-bench by plugging it into four different methods of two lines of approaches, where RepoGraph substantially boosts the performance of all systems, leading to a new state-of-the-art among open-source frameworks. Our analyses also demonstrate the extensibility and flexibility of RepoGraph by testing on another repo-level coding benchmark, CrossCodeEval. Our code is available at https://github.com/ozyyshr/RepoGraph. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14684v1-abstract-full').style.display = 'none'; document.getElementById('2410.14684v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Work in progress</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.14425">arXiv:2410.14425</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.14425">pdf</a>, <a href="https://arxiv.org/format/2410.14425">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Unlearning Backdoor Attacks for LLMs with Weak-to-Strong Knowledge Distillation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+S">Shuai Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+X">Xiaobao Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+C">Cong-Duy Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Meihuizi Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+Y">Yichao Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Tuan%2C+L+A">Luu Anh Tuan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.14425v1-abstract-short" style="display: inline;"> Parameter-efficient fine-tuning (PEFT) can bridge the gap between large language models (LLMs) and downstream tasks. However, PEFT has been proven vulnerable to malicious attacks. Research indicates that poisoned LLMs, even after PEFT, retain the capability to activate internalized backdoors when input samples contain predefined triggers. In this paper, we introduce a novel weak-to-strong unlearni&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14425v1-abstract-full').style.display = 'inline'; document.getElementById('2410.14425v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.14425v1-abstract-full" style="display: none;"> Parameter-efficient fine-tuning (PEFT) can bridge the gap between large language models (LLMs) and downstream tasks. However, PEFT has been proven vulnerable to malicious attacks. Research indicates that poisoned LLMs, even after PEFT, retain the capability to activate internalized backdoors when input samples contain predefined triggers. In this paper, we introduce a novel weak-to-strong unlearning algorithm to defend against backdoor attacks based on feature alignment knowledge distillation, named W2SDefense. Specifically, we first train a small-scale language model through full-parameter fine-tuning to serve as the clean teacher model. Then, this teacher model guides the large-scale poisoned student model in unlearning the backdoor, leveraging PEFT. Theoretical analysis suggests that W2SDefense has the potential to enhance the student model&#39;s ability to unlearn backdoor features, preventing the activation of the backdoor. We conduct experiments on text classification tasks involving three state-of-the-art language models and three different backdoor attack algorithms. Our empirical results demonstrate the outstanding performance of W2SDefense in defending against backdoor attacks without compromising model performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14425v1-abstract-full').style.display = 'none'; document.getElementById('2410.14425v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.14179">arXiv:2410.14179</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.14179">pdf</a>, <a href="https://arxiv.org/format/2410.14179">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MultiChartQA: Benchmarking Vision-Language Models on Multi-Chart Problems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+Z">Zifeng Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengzhao Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhihan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+L">Lang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+M">Meng Jiang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.14179v1-abstract-short" style="display: inline;"> Multimodal Large Language Models (MLLMs) have demonstrated impressive abilities across various tasks, including visual question answering and chart comprehension, yet existing benchmarks for chart-related tasks fall short in capturing the complexity of real-world multi-chart scenarios. Current benchmarks primarily focus on single-chart tasks, neglecting the multi-hop reasoning required to extract&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14179v1-abstract-full').style.display = 'inline'; document.getElementById('2410.14179v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.14179v1-abstract-full" style="display: none;"> Multimodal Large Language Models (MLLMs) have demonstrated impressive abilities across various tasks, including visual question answering and chart comprehension, yet existing benchmarks for chart-related tasks fall short in capturing the complexity of real-world multi-chart scenarios. Current benchmarks primarily focus on single-chart tasks, neglecting the multi-hop reasoning required to extract and integrate information from multiple charts, which is essential in practical applications. To fill this gap, we introduce MultiChartQA, a benchmark that evaluates MLLMs&#39; capabilities in four key areas: direct question answering, parallel question answering, comparative reasoning, and sequential reasoning. Our evaluation of a wide range of MLLMs reveals significant performance gaps compared to humans. These results highlight the challenges in multi-chart comprehension and the potential of MultiChartQA to drive advancements in this field. Our code and data are available at https://github.com/Zivenzhu/Multi-chart-QA <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14179v1-abstract-full').style.display = 'none'; document.getElementById('2410.14179v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.01744">arXiv:2410.01744</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.01744">pdf</a>, <a href="https://arxiv.org/format/2410.01744">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Leopard: A Vision Language Model For Text-Rich Multi-Image Tasks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengzhao Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+W">Wenhao Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+K">Kaixin Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+T">Tianqing Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhihan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ouyang%2C+S">Siru Ouyang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Hongming Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+M">Meng Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+D">Dong Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.01744v2-abstract-short" style="display: inline;"> Text-rich images, where text serves as the central visual element guiding the overall understanding, are prevalent in real-world applications, such as presentation slides, scanned documents, and webpage snapshots. Tasks involving multiple text-rich images are especially challenging, as they require not only understanding the content of individual images but reasoning about inter-relationships and&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01744v2-abstract-full').style.display = 'inline'; document.getElementById('2410.01744v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.01744v2-abstract-full" style="display: none;"> Text-rich images, where text serves as the central visual element guiding the overall understanding, are prevalent in real-world applications, such as presentation slides, scanned documents, and webpage snapshots. Tasks involving multiple text-rich images are especially challenging, as they require not only understanding the content of individual images but reasoning about inter-relationships and logical flows across multiple visual inputs. Despite the importance of these scenarios, current multimodal large language models (MLLMs) struggle to handle such tasks due to two key challenges: (1) the scarcity of high-quality instruction tuning datasets for text-rich multi-image scenarios, and (2) the difficulty in balancing image resolution with visual feature sequence length. To address these challenges, we propose Leopard, a MLLM designed specifically for handling vision-language tasks involving multiple text-rich images. First, we curated about one million high-quality multimodal instruction-tuning data, tailored to text-rich, multi-image scenarios. Second, we developed an adaptive high-resolution multi-image encoding module to dynamically optimize the allocation of visual sequence length based on the original aspect ratios and resolutions of the input images. Experiments across a wide range of benchmarks demonstrate our model&#39;s superior capabilities in text-rich, multi-image evaluations and competitive performance in general domain evaluations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01744v2-abstract-full').style.display = 'none'; document.getElementById('2410.01744v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Our code is available at https://github.com/Jill0001/Leopard</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.05212">arXiv:2409.05212</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.05212">pdf</a>, <a href="https://arxiv.org/format/2409.05212">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> </div> <p class="title is-5 mathjax"> SS-BRPE: Self-Supervised Blind Room Parameter Estimation Using Attention Mechanisms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chunxi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Maoshen Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Meiran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Bao%2C+C">Changchun Bao</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+W">Wenyu Jin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.05212v1-abstract-short" style="display: inline;"> In recent years, dynamic parameterization of acoustic environments has garnered attention in audio processing. This focus includes room volume and reverberation time (RT60), which define local acoustics independent of sound source and receiver orientation. Previous studies show that purely attention-based models can achieve advanced results in room parameter estimation. However, their success reli&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.05212v1-abstract-full').style.display = 'inline'; document.getElementById('2409.05212v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.05212v1-abstract-full" style="display: none;"> In recent years, dynamic parameterization of acoustic environments has garnered attention in audio processing. This focus includes room volume and reverberation time (RT60), which define local acoustics independent of sound source and receiver orientation. Previous studies show that purely attention-based models can achieve advanced results in room parameter estimation. However, their success relies on supervised pretrainings that require a large amount of labeled true values for room parameters and complex training pipelines. In light of this, we propose a novel Self-Supervised Blind Room Parameter Estimation (SS-BRPE) system. This system combines a purely attention-based model with self-supervised learning to estimate room acoustic parameters, from single-channel noisy speech signals. By utilizing unlabeled audio data for pretraining, the proposed system significantly reduces dependencies on costly labeled datasets. Our model also incorporates dynamic feature augmentation during fine-tuning to enhance adaptability and generalizability. Experimental results demonstrate that the SS-BRPE system not only achieves more superior performance in estimating room parameters than state-of-the-art (SOTA) methods but also effectively maintains high accuracy under conditions with limited labeled data. Code available at https://github.com/bjut-chunxiwang/SS-BRPE. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.05212v1-abstract-full').style.display = 'none'; document.getElementById('2409.05212v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 3 figures, submitted to ICASSP 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.04834">arXiv:2409.04834</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.04834">pdf</a>, <a href="https://arxiv.org/format/2409.04834">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3674805.3695403">10.1145/3674805.3695403 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Reducing Events to Augment Log-based Anomaly Detection Models: An Empirical Study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Lingzhe Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+T">Tong Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+K">Kangjin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengxi Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Yong%2C+Y">Yang Yong</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Ying Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.04834v2-abstract-short" style="display: inline;"> As software systems grow increasingly intricate, the precise detection of anomalies have become both essential and challenging. Current log-based anomaly detection methods depend heavily on vast amounts of log data leading to inefficient inference and potential misguidance by noise logs. However, the quantitative effects of log reduction on the effectiveness of anomaly detection remain unexplored.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.04834v2-abstract-full').style.display = 'inline'; document.getElementById('2409.04834v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.04834v2-abstract-full" style="display: none;"> As software systems grow increasingly intricate, the precise detection of anomalies have become both essential and challenging. Current log-based anomaly detection methods depend heavily on vast amounts of log data leading to inefficient inference and potential misguidance by noise logs. However, the quantitative effects of log reduction on the effectiveness of anomaly detection remain unexplored. Therefore, we first conduct a comprehensive study on six distinct models spanning three datasets. Through the study, the impact of log quantity and their effectiveness in representing anomalies is qualifies, uncovering three distinctive log event types that differently influence model performance. Drawing from these insights, we propose LogCleaner: an efficient methodology for the automatic reduction of log events in the context of anomaly detection. Serving as middleware between software systems and models, LogCleaner continuously updates and filters anti-events and duplicative-events in the raw generated logs. Experimental outcomes highlight LogCleaner&#39;s capability to reduce over 70% of log events in anomaly detection, accelerating the model&#39;s inference speed by approximately 300%, and universally improving the performance of models for anomaly detection. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.04834v2-abstract-full').style.display = 'none'; document.getElementById('2409.04834v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted By ESEM&#39;24</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.07773">arXiv:2408.07773</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.07773">pdf</a>, <a href="https://arxiv.org/format/2408.07773">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> MedTsLLM: Leveraging LLMs for Multimodal Medical Time Series Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chan%2C+N">Nimeesha Chan</a>, <a href="/search/cs?searchtype=author&amp;query=Parker%2C+F">Felix Parker</a>, <a href="/search/cs?searchtype=author&amp;query=Bennett%2C+W">William Bennett</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+T">Tianyi Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M+Y">Mung Yao Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Fackler%2C+J">James Fackler</a>, <a href="/search/cs?searchtype=author&amp;query=Ghobadi%2C+K">Kimia Ghobadi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.07773v1-abstract-short" style="display: inline;"> The complexity and heterogeneity of data in many real-world applications pose significant challenges for traditional machine learning and signal processing techniques. For instance, in medicine, effective analysis of diverse physiological signals is crucial for patient monitoring and clinical decision-making and yet highly challenging. We introduce MedTsLLM, a general multimodal large language mod&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.07773v1-abstract-full').style.display = 'inline'; document.getElementById('2408.07773v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.07773v1-abstract-full" style="display: none;"> The complexity and heterogeneity of data in many real-world applications pose significant challenges for traditional machine learning and signal processing techniques. For instance, in medicine, effective analysis of diverse physiological signals is crucial for patient monitoring and clinical decision-making and yet highly challenging. We introduce MedTsLLM, a general multimodal large language model (LLM) framework that effectively integrates time series data and rich contextual information in the form of text to analyze physiological signals, performing three tasks with clinical relevance: semantic segmentation, boundary detection, and anomaly detection in time series. These critical tasks enable deeper analysis of physiological signals and can provide actionable insights for clinicians. We utilize a reprogramming layer to align embeddings of time series patches with a pretrained LLM&#39;s embedding space and make effective use of raw time series, in conjunction with textual context. Given the multivariate nature of medical datasets, we develop methods to handle multiple covariates. We additionally tailor the text prompt to include patient-specific information. Our model outperforms state-of-the-art baselines, including deep learning models, other LLMs, and clinical methods across multiple medical domains, specifically electrocardiograms and respiratory waveforms. MedTsLLM presents a promising step towards harnessing the power of LLMs for medical time series analysis that can elevate data-driven tools for clinicians and improve patient outcomes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.07773v1-abstract-full').style.display = 'none'; document.getElementById('2408.07773v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">published in Proceedings of Machine Learning Research, MLHC 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.08515">arXiv:2407.08515</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.08515">pdf</a>, <a href="https://arxiv.org/format/2407.08515">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> 15M Multimodal Facial Image-Text Dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dai%2C+D">Dawei Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">YuTang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">YingGe Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mingming Jia</a>, <a href="/search/cs?searchtype=author&amp;query=YuanHui%2C+Z">Zhang YuanHui</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+G">Guoyin Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.08515v2-abstract-short" style="display: inline;"> Currently, image-text-driven multi-modal deep learning models have demonstrated their outstanding potential in many fields. In practice, tasks centered around facial images have broad application prospects. This paper presents \textbf{FaceCaption-15M}, a large-scale, diverse, and high-quality dataset of facial images accompanied by their natural language descriptions (facial image-to-text). This d&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.08515v2-abstract-full').style.display = 'inline'; document.getElementById('2407.08515v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.08515v2-abstract-full" style="display: none;"> Currently, image-text-driven multi-modal deep learning models have demonstrated their outstanding potential in many fields. In practice, tasks centered around facial images have broad application prospects. This paper presents \textbf{FaceCaption-15M}, a large-scale, diverse, and high-quality dataset of facial images accompanied by their natural language descriptions (facial image-to-text). This dataset aims to facilitate a study on face-centered tasks. FaceCaption-15M comprises over 15 million pairs of facial images and their corresponding natural language descriptions of facial features, making it the largest facial image-caption dataset to date. We conducted a comprehensive analysis of image quality, text naturalness, text complexity, and text-image relevance to demonstrate the superiority of FaceCaption-15M. To validate the effectiveness of FaceCaption-15M, we first trained a facial language-image pre-training model (FLIP, similar to CLIP) to align facial image with its corresponding captions in feature space. Subsequently, using both image and text encoders and fine-tuning only the linear layer, our FLIP-based models achieved state-of-the-art results on two challenging face-centered tasks. The purpose is to promote research in the field of face-related tasks through the availability of the proposed FaceCaption-15M dataset. All data, codes, and models are publicly available. https://huggingface.co/datasets/OpenFace-CQUPT/FaceCaption-15M <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.08515v2-abstract-full').style.display = 'none'; document.getElementById('2407.08515v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.02501">arXiv:2407.02501</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.02501">pdf</a>, <a href="https://arxiv.org/format/2407.02501">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Engineering, Finance, and Science">cs.CE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applications">stat.AP</span> </div> </div> <p class="title is-5 mathjax"> Data-driven Power Flow Linearization: Theory </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengshuo Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Hug%2C+G">Gabriela Hug</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+N">Ning Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhaojian Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Kang%2C+C">Chongqing Kang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.02501v1-abstract-short" style="display: inline;"> This two-part tutorial dives into the field of data-driven power flow linearization (DPFL), a domain gaining increased attention. DPFL stands out for its higher approximation accuracy, wide adaptability, and better ability to implicitly incorporate the latest system attributes. This renders DPFL a potentially superior option for managing the significant fluctuations from renewable energy sources,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.02501v1-abstract-full').style.display = 'inline'; document.getElementById('2407.02501v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.02501v1-abstract-full" style="display: none;"> This two-part tutorial dives into the field of data-driven power flow linearization (DPFL), a domain gaining increased attention. DPFL stands out for its higher approximation accuracy, wide adaptability, and better ability to implicitly incorporate the latest system attributes. This renders DPFL a potentially superior option for managing the significant fluctuations from renewable energy sources, a step towards realizing a more sustainable energy future, by translating the higher model accuracy into increased economic efficiency and less energy losses. To conduct a deep and rigorous reexamination, this tutorial first classifies existing DPFL methods into DPFL training algorithms and supportive techniques. Their mathematical models, analytical solutions, capabilities, limitations, and generalizability are systematically examined, discussed, and summarized. In addition, this tutorial reviews existing DPFL experiments, examining the settings of test systems, the fidelity of datasets, and the comparison made among a limited number of DPFL methods. Further, this tutorial implements extensive numerical comparisons of all existing DPFL methods (40 methods in total) and four classic physics-driven approaches, focusing on their generalizability, applicability, accuracy, and computational efficiency. Through these simulationmethodss, this tutorial aims to reveal the actual performance of all the methods (including the performances exposed to data noise or outliers), guiding the selection of appropriate linearization methods. Furthermore, this tutorial discusses future directions based on the theoretical and numerical insights gained. As the first part, this paper reexamines DPFL theories, covering all the training algorithms and supportive techniques. Capabilities, limitations, and aspects of generalizability, which were previously unmentioned in the literature, have been identified. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.02501v1-abstract-full').style.display = 'none'; document.getElementById('2407.02501v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.17215">arXiv:2406.17215</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.17215">pdf</a>, <a href="https://arxiv.org/format/2406.17215">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Enabling Large Language Models to Perform Power System Simulations with Previously Unseen Tools: A Case of Daline </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengshuo Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+Z">Zeyu Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Hug%2C+G">Gabriela Hug</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.17215v3-abstract-short" style="display: inline;"> The integration of experiment technologies with large language models (LLMs) is transforming scientific research, offering AI capabilities beyond specialized problem-solving to becoming research assistants for human scientists. In power systems, simulations are essential for research. However, LLMs face significant challenges in power system simulations due to limited pre-existing knowledge and th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.17215v3-abstract-full').style.display = 'inline'; document.getElementById('2406.17215v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.17215v3-abstract-full" style="display: none;"> The integration of experiment technologies with large language models (LLMs) is transforming scientific research, offering AI capabilities beyond specialized problem-solving to becoming research assistants for human scientists. In power systems, simulations are essential for research. However, LLMs face significant challenges in power system simulations due to limited pre-existing knowledge and the complexity of power grids. To address this issue, this work proposes a modular framework that integrates expertise from both the power system and LLM domains. This framework enhances LLMs&#39; ability to perform power system simulations on previously unseen tools. Validated using 34 simulation tasks in Daline, a (optimal) power flow simulation and linearization toolbox not yet exposed to LLMs, the proposed framework improved GPT-4o&#39;s simulation coding accuracy from 0% to 96.07%, also outperforming the ChatGPT-4o web interface&#39;s 33.8% accuracy (with the entire knowledge base uploaded). These results highlight the potential of LLMs as research assistants in power systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.17215v3-abstract-full').style.display = 'none'; document.getElementById('2406.17215v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.15677">arXiv:2406.15677</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.15677">pdf</a>, <a href="https://arxiv.org/format/2406.15677">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Open-vocabulary Pick and Place via Patch-level Semantic Maps </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mingxi Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Haojie Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhewen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chenghao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+L">Linfeng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+D">Dian Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J+X">Jason Xinyu Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Walters%2C+R">Robin Walters</a>, <a href="/search/cs?searchtype=author&amp;query=Platt%2C+R">Robert Platt</a>, <a href="/search/cs?searchtype=author&amp;query=Tellex%2C+S">Stefanie Tellex</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.15677v1-abstract-short" style="display: inline;"> Controlling robots through natural language instructions in open-vocabulary scenarios is pivotal for enhancing human-robot collaboration and complex robot behavior synthesis. However, achieving this capability poses significant challenges due to the need for a system that can generalize from limited data to a wide range of tasks and environments. Existing methods rely on large, costly datasets and&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.15677v1-abstract-full').style.display = 'inline'; document.getElementById('2406.15677v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.15677v1-abstract-full" style="display: none;"> Controlling robots through natural language instructions in open-vocabulary scenarios is pivotal for enhancing human-robot collaboration and complex robot behavior synthesis. However, achieving this capability poses significant challenges due to the need for a system that can generalize from limited data to a wide range of tasks and environments. Existing methods rely on large, costly datasets and struggle with generalization. This paper introduces Grounded Equivariant Manipulation (GEM), a novel approach that leverages the generative capabilities of pre-trained vision-language models and geometric symmetries to facilitate few-shot and zero-shot learning for open-vocabulary robot manipulation tasks. Our experiments demonstrate GEM&#39;s high sample efficiency and superior generalization across diverse pick-and-place tasks in both simulation and real-world experiments, showcasing its ability to adapt to novel instructions and unseen objects with minimal data requirements. GEM advances a significant step forward in the domain of language-conditioned robot control, bridging the gap between semantic understanding and action generation in robotic systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.15677v1-abstract-full').style.display = 'none'; document.getElementById('2406.15677v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.14326">arXiv:2406.14326</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.14326">pdf</a>, <a href="https://arxiv.org/format/2406.14326">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> medIKAL: Integrating Knowledge Graphs as Assistants of LLMs for Enhanced Clinical Diagnosis on EMRs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mingyi Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+J">Junwen Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+Y">Yan Song</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jianxin Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.14326v1-abstract-short" style="display: inline;"> Electronic Medical Records (EMRs), while integral to modern healthcare, present challenges for clinical reasoning and diagnosis due to their complexity and information redundancy. To address this, we proposed medIKAL (Integrating Knowledge Graphs as Assistants of LLMs), a framework that combines Large Language Models (LLMs) with knowledge graphs (KGs) to enhance diagnostic capabilities. medIKAL as&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.14326v1-abstract-full').style.display = 'inline'; document.getElementById('2406.14326v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.14326v1-abstract-full" style="display: none;"> Electronic Medical Records (EMRs), while integral to modern healthcare, present challenges for clinical reasoning and diagnosis due to their complexity and information redundancy. To address this, we proposed medIKAL (Integrating Knowledge Graphs as Assistants of LLMs), a framework that combines Large Language Models (LLMs) with knowledge graphs (KGs) to enhance diagnostic capabilities. medIKAL assigns weighted importance to entities in medical records based on their type, enabling precise localization of candidate diseases within KGs. It innovatively employs a residual network-like approach, allowing initial diagnosis by the LLM to be merged into KG search results. Through a path-based reranking algorithm and a fill-in-the-blank style prompt template, it further refined the diagnostic process. We validated medIKAL&#39;s effectiveness through extensive experiments on a newly introduced open-sourced Chinese EMR dataset, demonstrating its potential to improve clinical diagnosis in real-world settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.14326v1-abstract-full').style.display = 'none'; document.getElementById('2406.14326v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.12050">arXiv:2406.12050</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.12050">pdf</a>, <a href="https://arxiv.org/format/2406.12050">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Learn Beyond The Answer: Training Language Models with Reflection for Mathematical Reasoning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhihan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ge%2C+T">Tao Ge</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+Z">Zhenwen Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+W">Wenhao Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+D">Dian Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengzhao Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+D">Dong Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+M">Meng Jiang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.12050v3-abstract-short" style="display: inline;"> Supervised fine-tuning enhances the problem-solving abilities of language models across various mathematical reasoning tasks. To maximize such benefits, existing research focuses on broadening the training set with various data augmentation techniques, which is effective for standard single-round question-answering settings. Our work introduces a novel technique aimed at cultivating a deeper under&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.12050v3-abstract-full').style.display = 'inline'; document.getElementById('2406.12050v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.12050v3-abstract-full" style="display: none;"> Supervised fine-tuning enhances the problem-solving abilities of language models across various mathematical reasoning tasks. To maximize such benefits, existing research focuses on broadening the training set with various data augmentation techniques, which is effective for standard single-round question-answering settings. Our work introduces a novel technique aimed at cultivating a deeper understanding of the training problems at hand, enhancing performance not only in standard settings but also in more complex scenarios that require reflective thinking. Specifically, we propose reflective augmentation, a method that embeds problem reflection into each training instance. It trains the model to consider alternative perspectives and engage with abstractions and analogies, thereby fostering a thorough comprehension through reflective reasoning. Extensive experiments validate the achievement of our aim, underscoring the unique advantages of our method and its complementary nature relative to existing augmentation techniques. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.12050v3-abstract-full').style.display = 'none'; document.getElementById('2406.12050v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to the main conference of EMNLP 2024; v3 fixes several typos, incorrect section numbers, and missing references to Appendix sections in v2</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.11740">arXiv:2406.11740</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.11740">pdf</a>, <a href="https://arxiv.org/format/2406.11740">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Imagination Policy: Using Generative Point Cloud Models for Learning Manipulation Policies </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Haojie Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Schmeckpeper%2C+K">Karl Schmeckpeper</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+D">Dian Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Biza%2C+O">Ondrej Biza</a>, <a href="/search/cs?searchtype=author&amp;query=Qian%2C+Y">Yaoyao Qian</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Haotian Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mingxi Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Platt%2C+R">Robert Platt</a>, <a href="/search/cs?searchtype=author&amp;query=Walters%2C+R">Robin Walters</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.11740v1-abstract-short" style="display: inline;"> Humans can imagine goal states during planning and perform actions to match those goals. In this work, we propose Imagination Policy, a novel multi-task key-frame policy network for solving high-precision pick and place tasks. Instead of learning actions directly, Imagination Policy generates point clouds to imagine desired states which are then translated to actions using rigid action estimation.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.11740v1-abstract-full').style.display = 'inline'; document.getElementById('2406.11740v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.11740v1-abstract-full" style="display: none;"> Humans can imagine goal states during planning and perform actions to match those goals. In this work, we propose Imagination Policy, a novel multi-task key-frame policy network for solving high-precision pick and place tasks. Instead of learning actions directly, Imagination Policy generates point clouds to imagine desired states which are then translated to actions using rigid action estimation. This transforms action inference into a local generative task. We leverage pick and place symmetries underlying the tasks in the generation process and achieve extremely high sample efficiency and generalizability to unseen configurations. Finally, we demonstrate state-of-the-art performance across various tasks on the RLbench benchmark compared with several strong baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.11740v1-abstract-full').style.display = 'none'; document.getElementById('2406.11740v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.11213">arXiv:2406.11213</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.11213">pdf</a>, <a href="https://arxiv.org/format/2406.11213">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> A Survey of AIOps for Failure Management in the Era of Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Lingzhe Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+T">Tong Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengxi Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yifan Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+A">Aiwei Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yong Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zhonghai Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+X">Xuming Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+P+S">Philip S. Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Ying Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.11213v4-abstract-short" style="display: inline;"> As software systems grow increasingly intricate, Artificial Intelligence for IT Operations (AIOps) methods have been widely used in software system failure management to ensure the high availability and reliability of large-scale distributed software systems. However, these methods still face several challenges, such as lack of cross-platform generality and cross-task flexibility. Fortunately, rec&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.11213v4-abstract-full').style.display = 'inline'; document.getElementById('2406.11213v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.11213v4-abstract-full" style="display: none;"> As software systems grow increasingly intricate, Artificial Intelligence for IT Operations (AIOps) methods have been widely used in software system failure management to ensure the high availability and reliability of large-scale distributed software systems. However, these methods still face several challenges, such as lack of cross-platform generality and cross-task flexibility. Fortunately, recent advancements in large language models (LLMs) can significantly address these challenges, and many approaches have already been proposed to explore this field. However, there is currently no comprehensive survey that discusses the differences between LLM-based AIOps and traditional AIOps methods. Therefore, this paper presents a comprehensive survey of AIOps technology for failure management in the LLM era. It includes a detailed definition of AIOps tasks for failure management, the data sources for AIOps, and the LLM-based approaches adopted for AIOps. Additionally, this survey explores the AIOps subtasks, the specific LLM-based approaches suitable for different AIOps subtasks, and the challenges and future directions of the domain, aiming to further its development and application. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.11213v4-abstract-full').style.display = 'none'; document.getElementById('2406.11213v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">35 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.07976">arXiv:2406.07976</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.07976">pdf</a>, <a href="https://arxiv.org/format/2406.07976">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3637528.3671725">10.1145/3637528.3671725 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Multivariate Log-based Anomaly Detection for Distributed Database </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Lingzhe Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+T">Tong Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengxi Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Ying Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yong Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zhonghai Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.07976v1-abstract-short" style="display: inline;"> Distributed databases are fundamental infrastructures of today&#39;s large-scale software systems such as cloud systems. Detecting anomalies in distributed databases is essential for maintaining software availability. Existing approaches, predominantly developed using Loghub-a comprehensive collection of log datasets from various systems-lack datasets specifically tailored to distributed databases, wh&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.07976v1-abstract-full').style.display = 'inline'; document.getElementById('2406.07976v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.07976v1-abstract-full" style="display: none;"> Distributed databases are fundamental infrastructures of today&#39;s large-scale software systems such as cloud systems. Detecting anomalies in distributed databases is essential for maintaining software availability. Existing approaches, predominantly developed using Loghub-a comprehensive collection of log datasets from various systems-lack datasets specifically tailored to distributed databases, which exhibit unique anomalies. Additionally, there&#39;s a notable absence of datasets encompassing multi-anomaly, multi-node logs. Consequently, models built upon these datasets, primarily designed for standalone systems, are inadequate for distributed databases, and the prevalent method of deeming an entire cluster anomalous based on irregularities in a single node leads to a high false-positive rate. This paper addresses the unique anomalies and multivariate nature of logs in distributed databases. We expose the first open-sourced, comprehensive dataset with multivariate logs from distributed databases. Utilizing this dataset, we conduct an extensive study to identify multiple database anomalies and to assess the effectiveness of state-of-the-art anomaly detection using multivariate log data. Our findings reveal that relying solely on logs from a single node is insufficient for accurate anomaly detection on distributed database. Leveraging these insights, we propose MultiLog, an innovative multivariate log-based anomaly detection approach tailored for distributed databases. Our experiments, based on this novel dataset, demonstrate MultiLog&#39;s superiority, outperforming existing state-of-the-art methods by approximately 12%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.07976v1-abstract-full').style.display = 'none'; document.getElementById('2406.07976v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by KDD&#39;24</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.06852">arXiv:2406.06852</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.06852">pdf</a>, <a href="https://arxiv.org/format/2406.06852">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> A Survey of Backdoor Attacks and Defenses on Large Language Models: Implications for Security Measures </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+S">Shuai Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Meihuizi Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+Z">Zhongliang Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Gan%2C+L">Leilei Gan</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+X">Xiaoyu Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+X">Xiaobao Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+J">Jie Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+Y">Yichao Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+F">Fengjun Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Tuan%2C+L+A">Luu Anh Tuan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.06852v4-abstract-short" style="display: inline;"> Large Language Models (LLMs), which bridge the gap between human language understanding and complex problem-solving, achieve state-of-the-art performance on several NLP tasks, particularly in few-shot and zero-shot settings. Despite the demonstrable efficacy of LLMs, due to constraints on computational resources, users have to engage with open-source language models or outsource the entire trainin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.06852v4-abstract-full').style.display = 'inline'; document.getElementById('2406.06852v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.06852v4-abstract-full" style="display: none;"> Large Language Models (LLMs), which bridge the gap between human language understanding and complex problem-solving, achieve state-of-the-art performance on several NLP tasks, particularly in few-shot and zero-shot settings. Despite the demonstrable efficacy of LLMs, due to constraints on computational resources, users have to engage with open-source language models or outsource the entire training process to third-party platforms. However, research has demonstrated that language models are susceptible to potential security vulnerabilities, particularly in backdoor attacks. Backdoor attacks are designed to introduce targeted vulnerabilities into language models by poisoning training samples or model weights, allowing attackers to manipulate model responses through malicious triggers. While existing surveys on backdoor attacks provide a comprehensive overview, they lack an in-depth examination of backdoor attacks specifically targeting LLMs. To bridge this gap and grasp the latest trends in the field, this paper presents a novel perspective on backdoor attacks for LLMs by focusing on fine-tuning methods. Specifically, we systematically classify backdoor attacks into three categories: full-parameter fine-tuning, parameter-efficient fine-tuning, and no fine-tuning Based on insights from a substantial review, we also discuss crucial issues for future research on backdoor attacks, such as further exploring attack algorithms that do not require fine-tuning, or developing more covert attack algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.06852v4-abstract-full').style.display = 'none'; document.getElementById('2406.06852v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.18085">arXiv:2405.18085</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.18085">pdf</a>, <a href="https://arxiv.org/format/2405.18085">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.26599/BDMA.2024.9020010">10.26599/BDMA.2024.9020010 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Network Diffusion -- Framework to Simulate Spreading Processes in Complex Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Czuba%2C+M">Micha艂 Czuba</a>, <a href="/search/cs?searchtype=author&amp;query=Nurek%2C+M">Mateusz Nurek</a>, <a href="/search/cs?searchtype=author&amp;query=Serwata%2C+D">Damian Serwata</a>, <a href="/search/cs?searchtype=author&amp;query=Qiu%2C+Y">Yu-Xuan Qiu</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mingshan Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Musial%2C+K">Katarzyna Musial</a>, <a href="/search/cs?searchtype=author&amp;query=Michalski%2C+R">Rados艂aw Michalski</a>, <a href="/search/cs?searchtype=author&amp;query=Br%C3%B3dka%2C+P">Piotr Br贸dka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.18085v1-abstract-short" style="display: inline;"> With the advancement of computational network science, its research scope has significantly expanded beyond static graphs to encompass more complex structures. The introduction of streaming, temporal, multilayer, and hypernetwork approaches has brought new possibilities and imposed additional requirements. For instance, by utilising these advancements, one can model structures such as social netwo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.18085v1-abstract-full').style.display = 'inline'; document.getElementById('2405.18085v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.18085v1-abstract-full" style="display: none;"> With the advancement of computational network science, its research scope has significantly expanded beyond static graphs to encompass more complex structures. The introduction of streaming, temporal, multilayer, and hypernetwork approaches has brought new possibilities and imposed additional requirements. For instance, by utilising these advancements, one can model structures such as social networks in a much more refined manner, which is particularly relevant in simulations of the spreading processes. Unfortunately, the pace of advancement is often too rapid for existing computational packages to keep up with the functionality updates. This results in a significant proliferation of tools used by researchers and, consequently, a lack of a universally accepted technological stack that would standardise experimental methods (as seen, e.g. in machine learning). This article addresses that issue by presenting an extended version of the Network Diffusion library. First, a survey of the existing approaches and toolkits for simulating spreading phenomena is shown and then, an overview of the framework functionalities. Finally, we report four case studies conducted with the package to demonstrate its usefulness: the impact of sanitary measures on the spread of COVID-19, the comparison of information diffusion on two temporal network models, and the effectiveness of seed selection methods in the task of influence maximisation in multilayer networks. We conclude the paper with a critical assessment of the library and the outline of still awaiting challenges to standardise research environments in computational network science. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.18085v1-abstract-full').style.display = 'none'; document.getElementById('2405.18085v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To be published in: Big Data Mining and Analytics (https://ieeexplore.ieee.org/xpl/RecentIssue.jsp?punumber=8254253)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.07283">arXiv:2405.07283</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.07283">pdf</a>, <a href="https://arxiv.org/format/2405.07283">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> BeautyMap: Binary-Encoded Adaptable Ground Matrix for Dynamic Points Removal in Global Maps </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mingkai Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qingwen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+B">Bowen Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+J">Jin Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Ming Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Jensfelt%2C+P">Patric Jensfelt</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.07283v1-abstract-short" style="display: inline;"> Global point clouds that correctly represent the static environment features can facilitate accurate localization and robust path planning. However, dynamic objects introduce undesired ghost tracks that are mixed up with the static environment. Existing dynamic removal methods normally fail to balance the performance in computational efficiency and accuracy. In response, we present BeautyMap to ef&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.07283v1-abstract-full').style.display = 'inline'; document.getElementById('2405.07283v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.07283v1-abstract-full" style="display: none;"> Global point clouds that correctly represent the static environment features can facilitate accurate localization and robust path planning. However, dynamic objects introduce undesired ghost tracks that are mixed up with the static environment. Existing dynamic removal methods normally fail to balance the performance in computational efficiency and accuracy. In response, we present BeautyMap to efficiently remove the dynamic points while retaining static features for high-fidelity global maps. Our approach utilizes a binary-encoded matrix to efficiently extract the environment features. With a bit-wise comparison between matrices of each frame and the corresponding map region, we can extract potential dynamic regions. Then we use coarse to fine hierarchical segmentation of the $z$-axis to handle terrain variations. The final static restoration module accounts for the range-visibility of each single scan and protects static points out of sight. Comparative experiments underscore BeautyMap&#39;s superior performance in both accuracy and efficiency against other dynamic points removal methods. The code is open-sourced at https://github.com/MKJia/BeautyMap. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.07283v1-abstract-full').style.display = 'none'; document.getElementById('2405.07283v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The first two authors are co-first authors. 8 pages, accepted by RA-L</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.14604">arXiv:2404.14604</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.14604">pdf</a>, <a href="https://arxiv.org/format/2404.14604">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Describe-then-Reason: Improving Multimodal Mathematical Reasoning through Visual Comprehension Training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengzhao Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhihan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+W">Wenhao Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Jiao%2C+F">Fangkai Jiao</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+M">Meng Jiang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.14604v3-abstract-short" style="display: inline;"> Open-source multimodal large language models (MLLMs) excel in various tasks involving textual and visual inputs but still struggle with complex multimodal mathematical reasoning, lagging behind proprietary models like GPT-4V(ision) and Gemini-Pro. Although fine-tuning with intermediate steps (i.e., rationales) elicits some mathematical reasoning skills, the resulting models still fall short in vis&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.14604v3-abstract-full').style.display = 'inline'; document.getElementById('2404.14604v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.14604v3-abstract-full" style="display: none;"> Open-source multimodal large language models (MLLMs) excel in various tasks involving textual and visual inputs but still struggle with complex multimodal mathematical reasoning, lagging behind proprietary models like GPT-4V(ision) and Gemini-Pro. Although fine-tuning with intermediate steps (i.e., rationales) elicits some mathematical reasoning skills, the resulting models still fall short in visual comprehension due to inadequate visual-centric supervision, which leads to inaccurate interpretation of math figures. To address this issue, we propose a two-step training pipeline VCAR, which emphasizes the Visual Comprehension training in Addition to mathematical Reasoning learning. It first improves the visual comprehension ability of MLLMs through the visual description generation task, followed by another training step on generating rationales with the assistance of descriptions. Experimental results on two popular benchmarks demonstrate that VCAR substantially outperforms baseline methods solely relying on rationale supervision, especially on problems with high visual demands. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.14604v3-abstract-full').style.display = 'none'; document.getElementById('2404.14604v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.11576">arXiv:2404.11576</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.11576">pdf</a>, <a href="https://arxiv.org/format/2404.11576">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> State-space Decomposition Model for Video Prediction Considering Long-term Motion Trend </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cui%2C+F">Fei Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+J">Jiaojiao Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+X">Xiaojiang Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Lai%2C+Z">Zelong Lai</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+M">Mengke Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Menghan Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+G">Guizhong Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.11576v1-abstract-short" style="display: inline;"> Stochastic video prediction enables the consideration of uncertainty in future motion, thereby providing a better reflection of the dynamic nature of the environment. Stochastic video prediction methods based on image auto-regressive recurrent models need to feed their predictions back into the latent space. Conversely, the state-space models, which decouple frame synthesis and temporal prediction&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.11576v1-abstract-full').style.display = 'inline'; document.getElementById('2404.11576v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.11576v1-abstract-full" style="display: none;"> Stochastic video prediction enables the consideration of uncertainty in future motion, thereby providing a better reflection of the dynamic nature of the environment. Stochastic video prediction methods based on image auto-regressive recurrent models need to feed their predictions back into the latent space. Conversely, the state-space models, which decouple frame synthesis and temporal prediction, proves to be more efficient. However, inferring long-term temporal information about motion and generalizing to dynamic scenarios under non-stationary assumptions remains an unresolved challenge. In this paper, we propose a state-space decomposition stochastic video prediction model that decomposes the overall video frame generation into deterministic appearance prediction and stochastic motion prediction. Through adaptive decomposition, the model&#39;s generalization capability to dynamic scenarios is enhanced. In the context of motion prediction, obtaining a prior on the long-term trend of future motion is crucial. Thus, in the stochastic motion prediction branch, we infer the long-term motion trend from conditional frames to guide the generation of future frames that exhibit high consistency with the conditional frames. Experimental results demonstrate that our model outperforms baselines on multiple datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.11576v1-abstract-full').style.display = 'none'; document.getElementById('2404.11576v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.09408">arXiv:2404.09408</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.09408">pdf</a>, <a href="https://arxiv.org/format/2404.09408">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> A Distributed Scalable Cross-chain State Channel Scheme Based on Recursive State Synchronization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liang%2C+X">Xinyu Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+R">Ruiying Du</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jing Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Meng Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+S">Shuangxi Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+Y">Yufeng Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+S">Shixiong Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.09408v1-abstract-short" style="display: inline;"> As cross-chain technology continues to advance, the scale of cross-chain transactions is experiencing significant expansion. To improve scalability, researchers have turned to the study of cross-chain state channels. However, most of the existing schemes rely on trusted parties to support channel operations. To address this issue, we present Interpipe: a distributed cross-chain state channel schem&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.09408v1-abstract-full').style.display = 'inline'; document.getElementById('2404.09408v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.09408v1-abstract-full" style="display: none;"> As cross-chain technology continues to advance, the scale of cross-chain transactions is experiencing significant expansion. To improve scalability, researchers have turned to the study of cross-chain state channels. However, most of the existing schemes rely on trusted parties to support channel operations. To address this issue, we present Interpipe: a distributed cross-chain state channel scheme. Specifically, we propose a real-time cross-chain synchronization scheme to ensure consistent operations between two blockchains to a cross-chain state channel. Moreover, we propose a batch transaction proof scheme based on recursive SNARK to meet the cross-chain verification needs of large-scale users. Based on the above designs, Interpipe offers protocols for opening, updating, closing, and disputing operations to cross-chain state channels. Security analysis shows that Interpipe has consistency and resistance, and experimental results demonstrate that a cross-chain state channel can be nearly as efficient as an existing intra-chain state channel. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.09408v1-abstract-full').style.display = 'none'; document.getElementById('2404.09408v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.05726">arXiv:2404.05726</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.05726">pdf</a>, <a href="https://arxiv.org/format/2404.05726">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MA-LMM: Memory-Augmented Large Multimodal Model for Long-Term Video Understanding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=He%2C+B">Bo He</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Hengduo Li</a>, <a href="/search/cs?searchtype=author&amp;query=Jang%2C+Y+K">Young Kyun Jang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Menglin Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+X">Xuefei Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Shah%2C+A">Ashish Shah</a>, <a href="/search/cs?searchtype=author&amp;query=Shrivastava%2C+A">Abhinav Shrivastava</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S">Ser-Nam Lim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.05726v2-abstract-short" style="display: inline;"> With the success of large language models (LLMs), integrating the vision model into LLMs to build vision-language foundation models has gained much more interest recently. However, existing LLM-based large multimodal models (e.g., Video-LLaMA, VideoChat) can only take in a limited number of frames for short video understanding. In this study, we mainly focus on designing an efficient and effective&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.05726v2-abstract-full').style.display = 'inline'; document.getElementById('2404.05726v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.05726v2-abstract-full" style="display: none;"> With the success of large language models (LLMs), integrating the vision model into LLMs to build vision-language foundation models has gained much more interest recently. However, existing LLM-based large multimodal models (e.g., Video-LLaMA, VideoChat) can only take in a limited number of frames for short video understanding. In this study, we mainly focus on designing an efficient and effective model for long-term video understanding. Instead of trying to process more frames simultaneously like most existing work, we propose to process videos in an online manner and store past video information in a memory bank. This allows our model to reference historical video content for long-term analysis without exceeding LLMs&#39; context length constraints or GPU memory limits. Our memory bank can be seamlessly integrated into current multimodal LLMs in an off-the-shelf manner. We conduct extensive experiments on various video understanding tasks, such as long-video understanding, video question answering, and video captioning, and our model can achieve state-of-the-art performances across multiple datasets. Code available at https://boheumd.github.io/MA-LMM/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.05726v2-abstract-full').style.display = 'none'; document.getElementById('2404.05726v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at CVPR 2024. Project Page https://boheumd.github.io/MA-LMM/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.02439">arXiv:2404.02439</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.02439">pdf</a>, <a href="https://arxiv.org/format/2404.02439">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> A neuroergonomics model to evaluating nuclear power plants operators&#39; performance under heat stress driven by ECG time-frequency spectrums and fNIRS prefrontal cortex network: a CNN-GAT fusion model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Ming Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Meng Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">JianYu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+X">XiangMin Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Z">ZhiHui Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+T">Tao Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.02439v1-abstract-short" style="display: inline;"> Operators experience complicated physiological and psychological states when exposed to extreme heat stress, which can impair cognitive function and decrease performance significantly, ultimately leading to severe secondary disasters. Therefore, there is an urgent need for a feasible technique to identify their abnormal states to enhance the reliability of human-cybernetics systems. With the advan&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.02439v1-abstract-full').style.display = 'inline'; document.getElementById('2404.02439v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.02439v1-abstract-full" style="display: none;"> Operators experience complicated physiological and psychological states when exposed to extreme heat stress, which can impair cognitive function and decrease performance significantly, ultimately leading to severe secondary disasters. Therefore, there is an urgent need for a feasible technique to identify their abnormal states to enhance the reliability of human-cybernetics systems. With the advancement of deep learning in physiological modeling, a model for evaluating operators&#39; performance driven by electrocardiogram (ECG) and functional near-infrared spectroscopy (fNIRS) was proposed, demonstrating high ecological validity. The model fused a convolutional neural network (CNN) backbone and a graph attention network (GAT) backbone to extract discriminative features from ECG time-frequency spectrums and fNIRS prefrontal cortex (PFC) network respectively with deeper neuroscience domain knowledge, and eventually achieved 0.90 AUC. Results supported that handcrafted features extracted by specialized neuroscience methods can alleviate overfitting. Inspired by the small-world nature of the brain network, the fNIRS PFC network was organized as an undirected graph and embedded by GAT. It is proven to perform better in information aggregation and delivery compared to a simple non-linear transformation. The model provides a potential neuroergonomics application for evaluating the human state in vital human-cybernetics systems under industry 5.0 scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.02439v1-abstract-full').style.display = 'none'; document.getElementById('2404.02439v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.01449">arXiv:2403.01449</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.01449">pdf</a>, <a href="https://arxiv.org/format/2403.01449">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DUFOMap: Efficient Dynamic Awareness Mapping </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Duberg%2C+D">Daniel Duberg</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qingwen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">MingKai Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Jensfelt%2C+P">Patric Jensfelt</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.01449v2-abstract-short" style="display: inline;"> The dynamic nature of the real world is one of the main challenges in robotics. The first step in dealing with it is to detect which parts of the world are dynamic. A typical benchmark task is to create a map that contains only the static part of the world to support, for example, localization and planning. Current solutions are often applied in post-processing, where parameter tuning allows the u&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.01449v2-abstract-full').style.display = 'inline'; document.getElementById('2403.01449v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.01449v2-abstract-full" style="display: none;"> The dynamic nature of the real world is one of the main challenges in robotics. The first step in dealing with it is to detect which parts of the world are dynamic. A typical benchmark task is to create a map that contains only the static part of the world to support, for example, localization and planning. Current solutions are often applied in post-processing, where parameter tuning allows the user to adjust the setting for a specific dataset. In this paper, we propose DUFOMap, a novel dynamic awareness mapping framework designed for efficient online processing. Despite having the same parameter settings for all scenarios, it performs better or is on par with state-of-the-art methods. Ray casting is utilized to identify and classify fully observed empty regions. Since these regions have been observed empty, it follows that anything inside them at another time must be dynamic. Evaluation is carried out in various scenarios, including outdoor environments in KITTI and Argoverse 2, open areas on the KTH campus, and with different sensor types. DUFOMap outperforms the state of the art in terms of accuracy and computational efficiency. The source code, benchmarks, and links to the datasets utilized are provided. See https://kth-rpl.github.io/dufomap for more details. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.01449v2-abstract-full').style.display = 'none'; document.getElementById('2403.01449v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The first two authors hold equal contribution. 8 pages, 7 figures, project page https://kth-rpl.github.io/dufomap</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.18107">arXiv:2402.18107</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.18107">pdf</a>, <a href="https://arxiv.org/format/2402.18107">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> Multimodal Interaction Modeling via Self-Supervised Multi-Task Learning for Review Helpfulness Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gong%2C+H">HongLin Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengzhao Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Jing%2C+L">Liqiang Jing</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.18107v2-abstract-short" style="display: inline;"> In line with the latest research, the task of identifying helpful reviews from a vast pool of user-generated textual and visual data has become a prominent area of study. Effective modal representations are expected to possess two key attributes: consistency and differentiation. Current methods designed for Multimodal Review Helpfulness Prediction (MRHP) face limitations in capturing distinctive i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.18107v2-abstract-full').style.display = 'inline'; document.getElementById('2402.18107v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.18107v2-abstract-full" style="display: none;"> In line with the latest research, the task of identifying helpful reviews from a vast pool of user-generated textual and visual data has become a prominent area of study. Effective modal representations are expected to possess two key attributes: consistency and differentiation. Current methods designed for Multimodal Review Helpfulness Prediction (MRHP) face limitations in capturing distinctive information due to their reliance on uniform multimodal annotation. The process of adding varied multimodal annotations is not only time-consuming but also labor-intensive. To tackle these challenges, we propose an auto-generated scheme based on multi-task learning to generate pseudo labels. This approach allows us to simultaneously train for the global multimodal interaction task and the separate cross-modal interaction subtasks, enabling us to learn and leverage both consistency and differentiation effectively. Subsequently, experimental results validate the effectiveness of pseudo labels, and our approach surpasses previous textual and multimodal baseline models on two widely accessible benchmark datasets, providing a solution to the MRHP problem. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.18107v2-abstract-full').style.display = 'none'; document.getElementById('2402.18107v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages,4 figures, 4 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.12168">arXiv:2402.12168</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.12168">pdf</a>, <a href="https://arxiv.org/format/2402.12168">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Defending Against Weight-Poisoning Backdoor Attacks for Parameter-Efficient Fine-Tuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+S">Shuai Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Gan%2C+L">Leilei Gan</a>, <a href="/search/cs?searchtype=author&amp;query=Tuan%2C+L+A">Luu Anh Tuan</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+J">Jie Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Lyu%2C+L">Lingjuan Lyu</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Meihuizi Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Wen%2C+J">Jinming Wen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.12168v3-abstract-short" style="display: inline;"> Recently, various parameter-efficient fine-tuning (PEFT) strategies for application to language models have been proposed and successfully implemented. However, this raises the question of whether PEFT, which only updates a limited set of model parameters, constitutes security vulnerabilities when confronted with weight-poisoning backdoor attacks. In this study, we show that PEFT is more susceptib&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.12168v3-abstract-full').style.display = 'inline'; document.getElementById('2402.12168v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.12168v3-abstract-full" style="display: none;"> Recently, various parameter-efficient fine-tuning (PEFT) strategies for application to language models have been proposed and successfully implemented. However, this raises the question of whether PEFT, which only updates a limited set of model parameters, constitutes security vulnerabilities when confronted with weight-poisoning backdoor attacks. In this study, we show that PEFT is more susceptible to weight-poisoning backdoor attacks compared to the full-parameter fine-tuning method, with pre-defined triggers remaining exploitable and pre-defined targets maintaining high confidence, even after fine-tuning. Motivated by this insight, we developed a Poisoned Sample Identification Module (PSIM) leveraging PEFT, which identifies poisoned samples through confidence, providing robust defense against weight-poisoning backdoor attacks. Specifically, we leverage PEFT to train the PSIM with randomly reset sample labels. During the inference process, extreme confidence serves as an indicator for poisoned samples, while others are clean. We conduct experiments on text classification tasks, five fine-tuning strategies, and three weight-poisoning backdoor attack methods. Experiments show near 100% success rates for weight-poisoning backdoor attacks when utilizing PEFT. Furthermore, our defensive approach exhibits overall competitive performance in mitigating weight-poisoning backdoor attacks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.12168v3-abstract-full').style.display = 'none'; document.getElementById('2402.12168v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NAACL Findings 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.06789">arXiv:2401.06789</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.06789">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Information Retrieval and Classification of Real-Time Multi-Source Hurricane Evacuation Notices </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+T">Tingting Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+S">Shubo Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Daly%2C+J">Jordan Daly</a>, <a href="/search/cs?searchtype=author&amp;query=Geiger%2C+M">Melissa Geiger</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Minna Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jinfeng Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.06789v1-abstract-short" style="display: inline;"> For an approaching disaster, the tracking of time-sensitive critical information such as hurricane evacuation notices is challenging in the United States. These notices are issued and distributed rapidly by numerous local authorities that may spread across multiple states. They often undergo frequent updates and are distributed through diverse online portals lacking standard formats. In this study&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.06789v1-abstract-full').style.display = 'inline'; document.getElementById('2401.06789v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.06789v1-abstract-full" style="display: none;"> For an approaching disaster, the tracking of time-sensitive critical information such as hurricane evacuation notices is challenging in the United States. These notices are issued and distributed rapidly by numerous local authorities that may spread across multiple states. They often undergo frequent updates and are distributed through diverse online portals lacking standard formats. In this study, we developed an approach to timely detect and track the locally issued hurricane evacuation notices. The text data were collected mainly with a spatially targeted web scraping method. They were manually labeled and then classified using natural language processing techniques with deep learning models. The classification of mandatory evacuation notices achieved a high accuracy (recall = 96%). We used Hurricane Ian (2022) to illustrate how real-time evacuation notices extracted from local government sources could be redistributed with a Web GIS system. Our method applied to future hurricanes provides live data for situation awareness to higher-level government agencies and news media. The archived data helps scholars to study government responses toward weather warnings and individual behaviors influenced by evacuation history. The framework may be applied to other types of disasters for rapid and targeted retrieval, classification, redistribution, and archiving of real-time government orders and notifications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.06789v1-abstract-full').style.display = 'none'; document.getElementById('2401.06789v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.05949">arXiv:2401.05949</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.05949">pdf</a>, <a href="https://arxiv.org/format/2401.05949">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Universal Vulnerabilities in Large Language Models: Backdoor Attacks for In-context Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+S">Shuai Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Meihuizi Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Tuan%2C+L+A">Luu Anh Tuan</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+F">Fengjun Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Wen%2C+J">Jinming Wen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.05949v6-abstract-short" style="display: inline;"> In-context learning, a paradigm bridging the gap between pre-training and fine-tuning, has demonstrated high efficacy in several NLP tasks, especially in few-shot settings. Despite being widely applied, in-context learning is vulnerable to malicious attacks. In this work, we raise security concerns regarding this paradigm. Our studies demonstrate that an attacker can manipulate the behavior of lar&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.05949v6-abstract-full').style.display = 'inline'; document.getElementById('2401.05949v6-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.05949v6-abstract-full" style="display: none;"> In-context learning, a paradigm bridging the gap between pre-training and fine-tuning, has demonstrated high efficacy in several NLP tasks, especially in few-shot settings. Despite being widely applied, in-context learning is vulnerable to malicious attacks. In this work, we raise security concerns regarding this paradigm. Our studies demonstrate that an attacker can manipulate the behavior of large language models by poisoning the demonstration context, without the need for fine-tuning the model. Specifically, we design a new backdoor attack method, named ICLAttack, to target large language models based on in-context learning. Our method encompasses two types of attacks: poisoning demonstration examples and poisoning demonstration prompts, which can make models behave in alignment with predefined intentions. ICLAttack does not require additional fine-tuning to implant a backdoor, thus preserving the model&#39;s generality. Furthermore, the poisoned examples are correctly labeled, enhancing the natural stealth of our attack method. Extensive experimental results across several language models, ranging in size from 1.3B to 180B parameters, demonstrate the effectiveness of our attack method, exemplified by a high average attack success rate of 95.0% across the three datasets on OPT models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.05949v6-abstract-full').style.display = 'none'; document.getElementById('2401.05949v6-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.15162">arXiv:2312.15162</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.15162">pdf</a>, <a href="https://arxiv.org/format/2312.15162">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Cycle-Consistency Learning for Captioning and Grounding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+N">Ning Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+J">Jiajun Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mingbo Jia</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.15162v1-abstract-short" style="display: inline;"> We present that visual grounding and image captioning, which perform as two mutually inverse processes, can be bridged together for collaborative training by careful designs. By consolidating this idea, we introduce CyCo, a cyclic-consistent learning framework to ameliorate the independent training pipelines of visual grounding and image captioning. The proposed framework (1) allows the semi-weakl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.15162v1-abstract-full').style.display = 'inline'; document.getElementById('2312.15162v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.15162v1-abstract-full" style="display: none;"> We present that visual grounding and image captioning, which perform as two mutually inverse processes, can be bridged together for collaborative training by careful designs. By consolidating this idea, we introduce CyCo, a cyclic-consistent learning framework to ameliorate the independent training pipelines of visual grounding and image captioning. The proposed framework (1) allows the semi-weakly supervised training of visual grounding; (2) improves the performance of fully supervised visual grounding; (3) yields a general captioning model that can describe arbitrary image regions. Extensive experiments show that our fully supervised grounding model achieves state-of-the-art performance, and the semi-weakly supervised one also exhibits competitive performance compared to the fully supervised counterparts. Our image captioning model has the capability to freely describe image regions and meanwhile shows impressive performance on prevalent captioning benchmarks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.15162v1-abstract-full').style.display = 'none'; document.getElementById('2312.15162v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in AAAI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.10493">arXiv:2312.10493</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.10493">pdf</a>, <a href="https://arxiv.org/format/2312.10493">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> Debiasing Multimodal Sarcasm Detection with Contrastive Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengzhao Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+C">Can Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Jing%2C+L">Liqiang Jing</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.10493v2-abstract-short" style="display: inline;"> Despite commendable achievements made by existing work, prevailing multimodal sarcasm detection studies rely more on textual content over visual information. It unavoidably induces spurious correlations between textual words and labels, thereby significantly hindering the models&#39; generalization capability. To address this problem, we define the task of out-of-distribution (OOD) multimodal sarcasm&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.10493v2-abstract-full').style.display = 'inline'; document.getElementById('2312.10493v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.10493v2-abstract-full" style="display: none;"> Despite commendable achievements made by existing work, prevailing multimodal sarcasm detection studies rely more on textual content over visual information. It unavoidably induces spurious correlations between textual words and labels, thereby significantly hindering the models&#39; generalization capability. To address this problem, we define the task of out-of-distribution (OOD) multimodal sarcasm detection, which aims to evaluate models&#39; generalizability when the word distribution is different in training and testing settings. Moreover, we propose a novel debiasing multimodal sarcasm detection framework with contrastive learning, which aims to mitigate the harmful effect of biased textual factors for robust OOD generalization. In particular, we first design counterfactual data augmentation to construct the positive samples with dissimilar word biases and negative samples with similar word biases. Subsequently, we devise an adapted debiasing contrastive learning mechanism to empower the model to learn robust task-relevant features and alleviate the adverse effect of biased words. Extensive experiments show the superiority of the proposed framework. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.10493v2-abstract-full').style.display = 'none'; document.getElementById('2312.10493v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.08711">arXiv:2311.08711</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.08711">pdf</a>, <a href="https://arxiv.org/format/2311.08711">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> PLUG: Leveraging Pivot Language in Cross-Lingual Instruction Tuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhihan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+D">Dong-Ho Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+Y">Yuwei Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+W">Wenhao Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengzhao Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+M">Meng Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Barbieri%2C+F">Francesco Barbieri</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.08711v2-abstract-short" style="display: inline;"> Instruction tuning has remarkably advanced large language models (LLMs) in understanding and responding to diverse human instructions. Despite the success in high-resource languages, its application in lower-resource ones faces challenges due to the imbalanced foundational abilities of LLMs across different languages, stemming from the uneven language distribution in their pre-training data. To ta&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.08711v2-abstract-full').style.display = 'inline'; document.getElementById('2311.08711v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.08711v2-abstract-full" style="display: none;"> Instruction tuning has remarkably advanced large language models (LLMs) in understanding and responding to diverse human instructions. Despite the success in high-resource languages, its application in lower-resource ones faces challenges due to the imbalanced foundational abilities of LLMs across different languages, stemming from the uneven language distribution in their pre-training data. To tackle this issue, we propose pivot language guided generation (PLUG), an approach that utilizes a high-resource language, primarily English, as the pivot to enhance instruction tuning in lower-resource languages. It trains the model to first process instructions in the pivot language, and then produce responses in the target language. To evaluate our approach, we introduce a benchmark, X-AlpacaEval, of instructions in 4 languages (Chinese, Korean, Italian, and Spanish), each annotated by professional translators. Our approach demonstrates a significant improvement in the instruction-following abilities of LLMs by 29% on average, compared to directly responding in the target language alone. Further experiments validate the versatility of our approach by employing alternative pivot languages beyond English to assist languages where LLMs exhibit lower proficiency. Our code and data are available at https://github.com/ytyz1307zzh/PLUG. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.08711v2-abstract-full').style.display = 'none'; document.getElementById('2311.08711v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.09987">arXiv:2310.09987</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.09987">pdf</a>, <a href="https://arxiv.org/format/2310.09987">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Network Disruption via Continuous Batch Removal: The Case of Sicilian Mafia </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mingshan Jia</a>, <a href="/search/cs?searchtype=author&amp;query=De+Meo%2C+P">Pasquale De Meo</a>, <a href="/search/cs?searchtype=author&amp;query=Gabrys%2C+B">Bogdan Gabrys</a>, <a href="/search/cs?searchtype=author&amp;query=Musial%2C+K">Katarzyna Musial</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.09987v1-abstract-short" style="display: inline;"> Network disruption is pivotal in understanding the robustness and vulnerability of complex networks, which is instrumental in devising strategies for infrastructure protection, epidemic control, cybersecurity, and combating crime. In this paper, with a particular focus on disrupting criminal networks, we proposed to impose a within-the-largest-connected-component constraint in a continuous batch r&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.09987v1-abstract-full').style.display = 'inline'; document.getElementById('2310.09987v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.09987v1-abstract-full" style="display: none;"> Network disruption is pivotal in understanding the robustness and vulnerability of complex networks, which is instrumental in devising strategies for infrastructure protection, epidemic control, cybersecurity, and combating crime. In this paper, with a particular focus on disrupting criminal networks, we proposed to impose a within-the-largest-connected-component constraint in a continuous batch removal disruption process. Through a series of experiments on a recently released Sicilian Mafia network, we revealed that the constraint would enhance degree-based methods while weakening betweenness-based approaches. Moreover, based on the findings from the experiments using various disruption strategies, we propose a structurally-filtered greedy disruption strategy that integrates the effectiveness of greedy-like methods with the efficiency of structural-metric-based approaches. The proposed strategy significantly outperforms the longstanding state-of-the-art method of betweenness centrality while maintaining the same time complexity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.09987v1-abstract-full').style.display = 'none'; document.getElementById('2310.09987v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.07700">arXiv:2310.07700</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.07700">pdf</a>, <a href="https://arxiv.org/format/2310.07700">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Knowledge-enhanced Memory Model for Emotional Support Conversation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengzhao Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Q">Qianglong Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Jing%2C+L">Liqiang Jing</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+D">Dawei Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+R">Renyu Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.07700v1-abstract-short" style="display: inline;"> The prevalence of mental disorders has become a significant issue, leading to the increased focus on Emotional Support Conversation as an effective supplement for mental health support. Existing methods have achieved compelling results, however, they still face three challenges: 1) variability of emotions, 2) practicality of the response, and 3) intricate strategy modeling. To address these challe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.07700v1-abstract-full').style.display = 'inline'; document.getElementById('2310.07700v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.07700v1-abstract-full" style="display: none;"> The prevalence of mental disorders has become a significant issue, leading to the increased focus on Emotional Support Conversation as an effective supplement for mental health support. Existing methods have achieved compelling results, however, they still face three challenges: 1) variability of emotions, 2) practicality of the response, and 3) intricate strategy modeling. To address these challenges, we propose a novel knowledge-enhanced Memory mODEl for emotional suppoRt coNversation (MODERN). Specifically, we first devise a knowledge-enriched dialogue context encoding to perceive the dynamic emotion change of different periods of the conversation for coherent user state modeling and select context-related concepts from ConceptNet for practical response generation. Thereafter, we implement a novel memory-enhanced strategy modeling module to model the semantic patterns behind the strategy categories. Extensive experiments on a widely used large-scale dataset verify the superiority of our model over cutting-edge baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.07700v1-abstract-full').style.display = 'none'; document.getElementById('2310.07700v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.13504">arXiv:2309.13504</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.13504">pdf</a>, <a href="https://arxiv.org/format/2309.13504">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> </div> <p class="title is-5 mathjax"> Attention Is All You Need For Blind Room Volume Estimation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chunxi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Maoshen Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Meiran Li</a>, <a href="/search/cs?searchtype=author&amp;query=Bao%2C+C">Changchun Bao</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+W">Wenyu Jin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.13504v3-abstract-short" style="display: inline;"> In recent years, dynamic parameterization of acoustic environments has raised increasing attention in the field of audio processing. One of the key parameters that characterize the local room acoustics in isolation from orientation and directivity of sources and receivers is the geometric room volume. Convolutional neural networks (CNNs) have been widely selected as the main models for conducting&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.13504v3-abstract-full').style.display = 'inline'; document.getElementById('2309.13504v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.13504v3-abstract-full" style="display: none;"> In recent years, dynamic parameterization of acoustic environments has raised increasing attention in the field of audio processing. One of the key parameters that characterize the local room acoustics in isolation from orientation and directivity of sources and receivers is the geometric room volume. Convolutional neural networks (CNNs) have been widely selected as the main models for conducting blind room acoustic parameter estimation, which aims to learn a direct mapping from audio spectrograms to corresponding labels. With the recent trend of self-attention mechanisms, this paper introduces a purely attention-based model to blindly estimate room volumes based on single-channel noisy speech signals. We demonstrate the feasibility of eliminating the reliance on CNN for this task and the proposed Transformer architecture takes Gammatone magnitude spectral coefficients and phase spectrograms as inputs. To enhance the model performance given the task-specific dataset, cross-modality transfer learning is also applied. Experimental results demonstrate that the proposed model outperforms traditional CNN models across a wide range of real-world acoustics spaces, especially with the help of the dedicated pretraining and data augmentation schemes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.13504v3-abstract-full').style.display = 'none'; document.getElementById('2309.13504v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 4 figures, to be published in proceedings of ICASSP 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.11653">arXiv:2309.11653</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.11653">pdf</a>, <a href="https://arxiv.org/format/2309.11653">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3613904.3642385">10.1145/3613904.3642385 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> &#34;It&#39;s a Fair Game&#34;, or Is It? Examining How Users Navigate Disclosure Risks and Benefits When Using LLM-Based Conversational Agents </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhiping Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Michelle Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+H">Hao-Ping Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+B">Bingsheng Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Das%2C+S">Sauvik Das</a>, <a href="/search/cs?searchtype=author&amp;query=Lerner%2C+A">Ada Lerner</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+D">Dakuo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+T">Tianshi Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.11653v2-abstract-short" style="display: inline;"> The widespread use of Large Language Model (LLM)-based conversational agents (CAs), especially in high-stakes domains, raises many privacy concerns. Building ethical LLM-based CAs that respect user privacy requires an in-depth understanding of the privacy risks that concern users the most. However, existing research, primarily model-centered, does not provide insight into users&#39; perspectives. To b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.11653v2-abstract-full').style.display = 'inline'; document.getElementById('2309.11653v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.11653v2-abstract-full" style="display: none;"> The widespread use of Large Language Model (LLM)-based conversational agents (CAs), especially in high-stakes domains, raises many privacy concerns. Building ethical LLM-based CAs that respect user privacy requires an in-depth understanding of the privacy risks that concern users the most. However, existing research, primarily model-centered, does not provide insight into users&#39; perspectives. To bridge this gap, we analyzed sensitive disclosures in real-world ChatGPT conversations and conducted semi-structured interviews with 19 LLM-based CA users. We found that users are constantly faced with trade-offs between privacy, utility, and convenience when using LLM-based CAs. However, users&#39; erroneous mental models and the dark patterns in system design limited their awareness and comprehension of the privacy risks. Additionally, the human-like interactions encouraged more sensitive disclosures, which complicated users&#39; ability to navigate the trade-offs. We discuss practical design guidelines and the needs for paradigm shifts to protect the privacy of LLM-based CA users. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.11653v2-abstract-full').style.display = 'none'; document.getElementById('2309.11653v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">26 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.08396">arXiv:2309.08396</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.08396">pdf</a>, <a href="https://arxiv.org/format/2309.08396">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Resource Optimization Using A Step-by-step Scheme in Wireless Sensing and Localization Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+R">Ruihang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jiayan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mu Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+T">Tingting Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.08396v1-abstract-short" style="display: inline;"> Due to the lack of wireless spectrum resources, people are focusing on the versatile wireless networks. Wireless localization and target sensing both rely on precise extraction of parameters such as signal amplitude, propagation delay and Doppler shift from the received signals. Due to the high multi-path resolution and strong penetration of UWB signals, both localization and sensing can be achiev&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.08396v1-abstract-full').style.display = 'inline'; document.getElementById('2309.08396v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.08396v1-abstract-full" style="display: none;"> Due to the lack of wireless spectrum resources, people are focusing on the versatile wireless networks. Wireless localization and target sensing both rely on precise extraction of parameters such as signal amplitude, propagation delay and Doppler shift from the received signals. Due to the high multi-path resolution and strong penetration of UWB signals, both localization and sensing can be achieved through the same UWB waveform. Practical networks are often resource-constrained, in order to improve the accuracy of integrated networks, we need to optimize the allocation of resources in the networks. Considering the complexity of the multi-slot networks, this paper derives the Fisher Information Matrix (FIM) expressions for single-slot and dual-slot integrated sensing and localization (ISAL) networks respectively, and proposes two resource optimization schemes, namely step-by-step scheme and integrated scheme. The numerical results show that: (i) for the sensing-resource-deficient networks with relatively uniform node distribution, the energy allocated to each step in the step-by-step scheme satisfies the relationship: energy for clock offset &lt; energy for radar localization &lt; energy for target sensing. (ii) In the multi-slot ISAL networks, the system will allocate more energy to the time slots where the networks are relatively sensing-resource-deficient. (iii) The step-by-step scheme is more suitable for the sensing-resource-abundant networks, while the integrated scheme is more suitable for the sensing-resource-deficient networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.08396v1-abstract-full').style.display = 'none'; document.getElementById('2309.08396v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">28 pages, 12 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.15131">arXiv:2308.15131</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.15131">pdf</a>, <a href="https://arxiv.org/format/2308.15131">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TCOMM.2024.3387869">10.1109/TCOMM.2024.3387869 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Robust Transceiver Design for Covert Integrated Sensing and Communications With Imperfect CSI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuchen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ni%2C+W">Wanli Ni</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jianquan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+W">Wanbin Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Min Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Eldar%2C+Y+C">Yonina C. Eldar</a>, <a href="/search/cs?searchtype=author&amp;query=Niyato%2C+D">Dusit Niyato</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.15131v2-abstract-short" style="display: inline;"> We propose a robust transceiver design for a covert integrated sensing and communications (ISAC) system with imperfect channel state information (CSI). Considering both bounded and probabilistic CSI error models, we formulate worst-case and outage-constrained robust optimization problems of joint trasceiver beamforming and radar waveform design to balance the radar performance of multiple targets&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.15131v2-abstract-full').style.display = 'inline'; document.getElementById('2308.15131v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.15131v2-abstract-full" style="display: none;"> We propose a robust transceiver design for a covert integrated sensing and communications (ISAC) system with imperfect channel state information (CSI). Considering both bounded and probabilistic CSI error models, we formulate worst-case and outage-constrained robust optimization problems of joint trasceiver beamforming and radar waveform design to balance the radar performance of multiple targets while ensuring communications performance and covertness of the system. The optimization problems are challenging due to the non-convexity arising from the semi-infinite constraints (SICs) and the coupled transceiver variables. In an effort to tackle the former difficulty, S-procedure and Bernstein-type inequality are introduced for converting the SICs into finite convex linear matrix inequalities (LMIs) and second-order cone constraints. A robust alternating optimization framework referred to alternating double-checking is developed for decoupling the transceiver design problem into feasibility-checking transmitter- and receiver-side subproblems, transforming the rank-one constraints into a set of LMIs, and verifying the feasibility of beamforming by invoking the matrix-lifting scheme. Numerical results are provided to demonstrate the effectiveness and robustness of the proposed algorithm in improving the performance of covert ISAC systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.15131v2-abstract-full').style.display = 'none'; document.getElementById('2308.15131v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work has been submitted to IEEE journal for publication</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Transactions on Communications, 2024 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.08961">arXiv:2308.08961</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.08961">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> On the Evaluation of Neural Code Translation: Taxonomy and Benchmark </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jiao%2C+M">Mingsheng Jiao</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+T">Tingrui Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xuan Li</a>, <a href="/search/cs?searchtype=author&amp;query=Qiu%2C+G">Guanjie Qiu</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+X">Xiaodong Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+B">Beijun Shen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.08961v1-abstract-short" style="display: inline;"> In recent years, neural code translation has gained increasing attention. While most of the research focuses on improving model architectures and training processes, we notice that the evaluation process and benchmark for code translation models are severely limited: they primarily treat source code as natural languages and provide a holistic accuracy score while disregarding the full spectrum of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.08961v1-abstract-full').style.display = 'inline'; document.getElementById('2308.08961v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.08961v1-abstract-full" style="display: none;"> In recent years, neural code translation has gained increasing attention. While most of the research focuses on improving model architectures and training processes, we notice that the evaluation process and benchmark for code translation models are severely limited: they primarily treat source code as natural languages and provide a holistic accuracy score while disregarding the full spectrum of model capabilities across different translation types and complexity. In this paper, we present a comprehensive investigation of four state-of-the-art models and analyze in-depth the advantages and limitations of three existing benchmarks. Based on the empirical results, we develop a taxonomy that categorizes code translation tasks into four primary types according to their complexity and knowledge dependence: token level (type 1), syntactic level (type 2), library level (type 3), and algorithm level (type 4). We then conduct a thorough analysis of how existing approaches perform across these four categories. Our findings indicate that while state-of-the-art code translation models excel in type-1 and type-2 translations, they struggle with knowledge-dependent ones such as type-3 and type-4. Existing benchmarks are biased towards trivial translations, such as keyword mapping. To overcome these limitations, we construct G-TransEval, a new benchmark by manually curating type-3 and type-4 translation pairs and unit test cases. Results on our new benchmark suggest that G-TransEval can exhibit more comprehensive and finer-grained capability of code translation models and thus provide a more rigorous evaluation. Our studies also provide more insightful findings and suggestions for future research, such as building type-3 and type-4 training data and ensembling multiple pretraining approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.08961v1-abstract-full').style.display = 'none'; document.getElementById('2308.08961v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted by ASE2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.07260">arXiv:2307.07260</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.07260">pdf</a>, <a href="https://arxiv.org/format/2307.07260">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> A Dynamic Points Removal Benchmark in Point Cloud Maps </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qingwen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Duberg%2C+D">Daniel Duberg</a>, <a href="/search/cs?searchtype=author&amp;query=Geng%2C+R">Ruoyu Geng</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mingkai Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Lujia Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Jensfelt%2C+P">Patric Jensfelt</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.07260v1-abstract-short" style="display: inline;"> In the field of robotics, the point cloud has become an essential map representation. From the perspective of downstream tasks like localization and global path planning, points corresponding to dynamic objects will adversely affect their performance. Existing methods for removing dynamic points in point clouds often lack clarity in comparative evaluations and comprehensive analysis. Therefore, we&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.07260v1-abstract-full').style.display = 'inline'; document.getElementById('2307.07260v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.07260v1-abstract-full" style="display: none;"> In the field of robotics, the point cloud has become an essential map representation. From the perspective of downstream tasks like localization and global path planning, points corresponding to dynamic objects will adversely affect their performance. Existing methods for removing dynamic points in point clouds often lack clarity in comparative evaluations and comprehensive analysis. Therefore, we propose an easy-to-extend unified benchmarking framework for evaluating techniques for removing dynamic points in maps. It includes refactored state-of-art methods and novel metrics to analyze the limitations of these approaches. This enables researchers to dive deep into the underlying reasons behind these limitations. The benchmark makes use of several datasets with different sensor types. All the code and datasets related to our study are publicly available for further development and utilization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.07260v1-abstract-full').style.display = 'none'; document.getElementById('2307.07260v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Code check https://github.com/KTH-RPL/DynamicMap_Benchmark.git , 7 pages, accepted by ITSC 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.03166">arXiv:2307.03166</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.03166">pdf</a>, <a href="https://arxiv.org/format/2307.03166">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> VideoGLUE: Video General Understanding Evaluation of Foundation Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+L">Liangzhe Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Gundavarapu%2C+N+B">Nitesh Bharadwaj Gundavarapu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+L">Long Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+H">Hao Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+Y">Yin Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+L">Lu Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+X">Xuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Menglin Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Weyand%2C+T">Tobias Weyand</a>, <a href="/search/cs?searchtype=author&amp;query=Friedman%2C+L">Luke Friedman</a>, <a href="/search/cs?searchtype=author&amp;query=Sirotenko%2C+M">Mikhail Sirotenko</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Huisheng Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Schroff%2C+F">Florian Schroff</a>, <a href="/search/cs?searchtype=author&amp;query=Adam%2C+H">Hartwig Adam</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+M">Ming-Hsuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+T">Ting Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+B">Boqing Gong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.03166v3-abstract-short" style="display: inline;"> We evaluate the video understanding capabilities of existing foundation models (FMs) using a carefully designed experiment protocol consisting of three hallmark tasks (action recognition,temporal localization, and spatiotemporal localization), eight datasets well received by the community, and four adaptation methods tailoring an FM for downstream tasks. Furthermore, we jointly profile FMs&#39; effica&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.03166v3-abstract-full').style.display = 'inline'; document.getElementById('2307.03166v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.03166v3-abstract-full" style="display: none;"> We evaluate the video understanding capabilities of existing foundation models (FMs) using a carefully designed experiment protocol consisting of three hallmark tasks (action recognition,temporal localization, and spatiotemporal localization), eight datasets well received by the community, and four adaptation methods tailoring an FM for downstream tasks. Furthermore, we jointly profile FMs&#39; efficacy and efficiency when adapting to general video understanding tasks using cost measurements during both training and inference. Our main findings areas follows. First, task-specialized models significantly outperform the seven FMs studied in this work, in sharp contrast to what FMs have achieved in natural language and image understanding. Second, video-native FMs, whose pretraining data mainly contains the video modality, are generally better than image-native FMs in classifying motion-rich videos, localizing actions in time, and understanding a video of more than one action. Third, the video-native FMs can perform well on video tasks under light adaptations to downstream tasks (e.g., freezing the FM backbones), while image-native FMs win in full end-to-end finetuning. The first two observations reveal the need and tremendous opportunities to conduct research on video-focused FMs, and the last confirms that both tasks and adaptation methods matter when it comes to the evaluation of FMs. Our code is released under: https://github.com/tensorflow/models/tree/master/official/projects/videoglue. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.03166v3-abstract-full').style.display = 'none'; document.getElementById('2307.03166v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to TMLR</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.16650">arXiv:2306.16650</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.16650">pdf</a>, <a href="https://arxiv.org/format/2306.16650">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Multi-source Semantic Graph-based Multimodal Sarcasm Explanation Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jing%2C+L">Liqiang Jing</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+X">Xuemeng Song</a>, <a href="/search/cs?searchtype=author&amp;query=Ouyang%2C+K">Kun Ouyang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengzhao Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Nie%2C+L">Liqiang Nie</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.16650v1-abstract-short" style="display: inline;"> Multimodal Sarcasm Explanation (MuSE) is a new yet challenging task, which aims to generate a natural language sentence for a multimodal social post (an image as well as its caption) to explain why it contains sarcasm. Although the existing pioneer study has achieved great success with the BART backbone, it overlooks the gap between the visual feature space and the decoder semantic space, the obje&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.16650v1-abstract-full').style.display = 'inline'; document.getElementById('2306.16650v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.16650v1-abstract-full" style="display: none;"> Multimodal Sarcasm Explanation (MuSE) is a new yet challenging task, which aims to generate a natural language sentence for a multimodal social post (an image as well as its caption) to explain why it contains sarcasm. Although the existing pioneer study has achieved great success with the BART backbone, it overlooks the gap between the visual feature space and the decoder semantic space, the object-level metadata of the image, as well as the potential external knowledge. To solve these limitations, in this work, we propose a novel mulTi-source sEmantic grAph-based Multimodal sarcasm explanation scheme, named TEAM. In particular, TEAM extracts the object-level semantic meta-data instead of the traditional global visual features from the input image. Meanwhile, TEAM resorts to ConceptNet to obtain the external related knowledge concepts for the input text and the extracted object meta-data. Thereafter, TEAM introduces a multi-source semantic graph that comprehensively characterize the multi-source (i.e., caption, object meta-data, external knowledge) semantic relations to facilitate the sarcasm reasoning. Extensive experiments on a public released dataset MORE verify the superiority of our model over cutting-edge methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.16650v1-abstract-full').style.display = 'none'; document.getElementById('2306.16650v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ACL 2023 main conference</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> ACL 2023 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.05236">arXiv:2306.05236</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.05236">pdf</a>, <a href="https://arxiv.org/format/2306.05236">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Population-Based Evolutionary Gaming for Unsupervised Person Re-identification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhai%2C+Y">Yunpeng Zhai</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+P">Peixi Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Mengxi Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+S">Shiyong Li</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+W">Weiqiang Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+X">Xuesong Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+Y">Yonghong Tian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.05236v1-abstract-short" style="display: inline;"> Unsupervised person re-identification has achieved great success through the self-improvement of individual neural networks. However, limited by the lack of diversity of discriminant information, a single network has difficulty learning sufficient discrimination ability by itself under unsupervised conditions. To address this limit, we develop a population-based evolutionary gaming (PEG) framework&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.05236v1-abstract-full').style.display = 'inline'; document.getElementById('2306.05236v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.05236v1-abstract-full" style="display: none;"> Unsupervised person re-identification has achieved great success through the self-improvement of individual neural networks. However, limited by the lack of diversity of discriminant information, a single network has difficulty learning sufficient discrimination ability by itself under unsupervised conditions. To address this limit, we develop a population-based evolutionary gaming (PEG) framework in which a population of diverse neural networks is trained concurrently through selection, reproduction, mutation, and population mutual learning iteratively. Specifically, the selection of networks to preserve is modeled as a cooperative game and solved by the best-response dynamics, then the reproduction and mutation are implemented by cloning and fluctuating hyper-parameters of networks to learn more diversity, and population mutual learning improves the discrimination of networks by knowledge distillation from each other within the population. In addition, we propose a cross-reference scatter (CRS) to approximately evaluate re-ID models without labeled samples and adopt it as the criterion of network selection in PEG. CRS measures a model&#39;s performance by indirectly estimating the accuracy of its predicted pseudo-labels according to the cohesion and separation of the feature space. Extensive experiments demonstrate that (1) CRS approximately measures the performance of models without labeled samples; (2) and PEG produces new state-of-the-art accuracy for person re-identification, indicating the great potential of population-based network cooperative training for unsupervised learning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.05236v1-abstract-full').style.display = 'none'; document.getElementById('2306.05236v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in IJCV</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.03881">arXiv:2306.03881</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.03881">pdf</a>, <a href="https://arxiv.org/format/2306.03881">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Emergent Correspondence from Image Diffusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tang%2C+L">Luming Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Menglin Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Q">Qianqian Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Phoo%2C+C+P">Cheng Perng Phoo</a>, <a href="/search/cs?searchtype=author&amp;query=Hariharan%2C+B">Bharath Hariharan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.03881v2-abstract-short" style="display: inline;"> Finding correspondences between images is a fundamental problem in computer vision. In this paper, we show that correspondence emerges in image diffusion models without any explicit supervision. We propose a simple strategy to extract this implicit knowledge out of diffusion networks as image features, namely DIffusion FeaTures (DIFT), and use them to establish correspondences between real images.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.03881v2-abstract-full').style.display = 'inline'; document.getElementById('2306.03881v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.03881v2-abstract-full" style="display: none;"> Finding correspondences between images is a fundamental problem in computer vision. In this paper, we show that correspondence emerges in image diffusion models without any explicit supervision. We propose a simple strategy to extract this implicit knowledge out of diffusion networks as image features, namely DIffusion FeaTures (DIFT), and use them to establish correspondences between real images. Without any additional fine-tuning or supervision on the task-specific data or annotations, DIFT is able to outperform both weakly-supervised methods and competitive off-the-shelf features in identifying semantic, geometric, and temporal correspondences. Particularly for semantic correspondence, DIFT from Stable Diffusion is able to outperform DINO and OpenCLIP by 19 and 14 accuracy points respectively on the challenging SPair-71k benchmark. It even outperforms the state-of-the-art supervised methods on 9 out of 18 categories while remaining on par for the overall performance. Project page: https://diffusionfeatures.github.io <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.03881v2-abstract-full').style.display = 'none'; document.getElementById('2306.03881v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NeurIPS 2023. Project page: https://diffusionfeatures.github.io</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.07223">arXiv:2303.07223</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.07223">pdf</a>, <a href="https://arxiv.org/format/2303.07223">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> PromptFusion: Decoupling Stability and Plasticity for Continual Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Haoran Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zuxuan Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+X">Xintong Han</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+M">Menglin Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Y">Yu-Gang Jiang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.07223v2-abstract-short" style="display: inline;"> Current research on continual learning mainly focuses on relieving catastrophic forgetting, and most of their success is at the cost of limiting the performance of newly incoming tasks. Such a trade-off is referred to as the stability-plasticity dilemma and is a more general and challenging problem for continual learning. However, the inherent conflict between these two concepts makes it seemingly&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.07223v2-abstract-full').style.display = 'inline'; document.getElementById('2303.07223v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.07223v2-abstract-full" style="display: none;"> Current research on continual learning mainly focuses on relieving catastrophic forgetting, and most of their success is at the cost of limiting the performance of newly incoming tasks. Such a trade-off is referred to as the stability-plasticity dilemma and is a more general and challenging problem for continual learning. However, the inherent conflict between these two concepts makes it seemingly impossible to devise a satisfactory solution to both of them simultaneously. Therefore, we ask, &#34;is it possible to divide them into two separate problems to conquer them independently?&#34;. To this end, we propose a prompt-tuning-based method termed PromptFusion to enable the decoupling of stability and plasticity. Specifically, PromptFusion consists of a carefully designed \stab module that deals with catastrophic forgetting and a \boo module to learn new knowledge concurrently. Furthermore, to address the computational overhead brought by the additional architecture, we propose PromptFusion-Lite which improves PromptFusion by dynamically determining whether to activate both modules for each input image. Extensive experiments show that both PromptFusion and PromptFusion-Lite achieve promising results on popular continual learning datasets for class-incremental and domain-incremental settings. Especially on Split-Imagenet-R, one of the most challenging datasets for class-incremental learning, our method can exceed state-of-the-art prompt-based methods by more than 5\% in accuracy, with PromptFusion-Lite using 14.8\% less computational resources than PromptFusion. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.07223v2-abstract-full').style.display = 'none'; document.getElementById('2303.07223v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ECCV 2024 camera-ready version</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Jia%2C+M&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Jia%2C+M&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Jia%2C+M&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10