CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 5,417 results for author: <span class="mathjax">Wang, H</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Wang%2C+H">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Wang, H"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Wang%2C+H&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Wang, H"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Wang%2C+H&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+H&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+H&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+H&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+H&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+H&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14405">arXiv:2411.14405</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14405">pdf</a>, <a href="https://arxiv.org/format/2411.14405">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Marco-o1: Towards Open Reasoning Models for Open-Ended Solutions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yu Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+H">Huifeng Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+B">Bo Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+T">Tianqi Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Lyu%2C+C">Chenyang Lyu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Longyue Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+W">Weihua Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+K">Kaifu Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14405v1-abstract-short" style="display: inline;"> Currently OpenAI o1 has sparked a surge of interest in the study of large reasoning models (LRM). Building on this momentum, Marco-o1 not only focuses on disciplines with standard answers, such as mathematics, physics, and coding -- which are well-suited for reinforcement learning (RL) -- but also places greater emphasis on open-ended resolutions. We aim to address the question: &#34;Can the o1 model&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14405v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14405v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14405v1-abstract-full" style="display: none;"> Currently OpenAI o1 has sparked a surge of interest in the study of large reasoning models (LRM). Building on this momentum, Marco-o1 not only focuses on disciplines with standard answers, such as mathematics, physics, and coding -- which are well-suited for reinforcement learning (RL) -- but also places greater emphasis on open-ended resolutions. We aim to address the question: &#34;Can the o1 model effectively generalize to broader domains where clear standards are absent and rewards are challenging to quantify?&#34; Marco-o1 is powered by Chain-of-Thought (CoT) fine-tuning, Monte Carlo Tree Search (MCTS), reflection mechanisms, and innovative reasoning strategies -- optimized for complex real-world problem-solving tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14405v1-abstract-full').style.display = 'none'; document.getElementById('2411.14405v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14002">arXiv:2411.14002</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14002">pdf</a>, <a href="https://arxiv.org/format/2411.14002">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SEMPose: A Single End-to-end Network for Multi-object Pose Estimation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xin Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+S">Shibei Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+D">Dezong Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14002v1-abstract-short" style="display: inline;"> In computer vision, estimating the six-degree-of-freedom pose from an RGB image is a fundamental task. However, this task becomes highly challenging in multi-object scenes. Currently, the best methods typically employ an indirect strategy, which identifies 2D and 3D correspondences, and then solves with the Perspective-n-Points method. Yet, this approach cannot be trained end-to-end. Direct method&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14002v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14002v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14002v1-abstract-full" style="display: none;"> In computer vision, estimating the six-degree-of-freedom pose from an RGB image is a fundamental task. However, this task becomes highly challenging in multi-object scenes. Currently, the best methods typically employ an indirect strategy, which identifies 2D and 3D correspondences, and then solves with the Perspective-n-Points method. Yet, this approach cannot be trained end-to-end. Direct methods, on the other hand, suffer from lower accuracy due to challenges such as varying object sizes and occlusions. To address these issues, we propose SEMPose, an end-to-end multi-object pose estimation network. SEMPose utilizes a well-designed texture-shape guided feature pyramid network, effectively tackling the challenge of object size variations. Additionally, it employs an iterative refinement head structure, progressively regressing rotation and translation separately to enhance estimation accuracy. During training, we alleviate the impact of occlusion by selecting positive samples from visible parts. Experimental results demonstrate that SEMPose can perform inference at 32 FPS without requiring inputs other than the RGB image. It can accurately estimate the poses of multiple objects in real time, with inference time unaffected by the number of target objects. On the LM-O and YCB-V datasets, our method outperforms other RGB-based single-model methods, achieving higher accuracy. Even when compared with multi-model methods and approaches that use additional refinement, our results remain competitive. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14002v1-abstract-full').style.display = 'none'; document.getElementById('2411.14002v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13868">arXiv:2411.13868</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13868">pdf</a>, <a href="https://arxiv.org/format/2411.13868">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Statistics Theory">math.ST</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Methodology">stat.ME</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Robust Detection of Watermarks for Large Language Models Under Human Edits </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xiang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Ruan%2C+F">Feng Ruan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Huiyuan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Long%2C+Q">Qi Long</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+W+J">Weijie J. Su</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13868v1-abstract-short" style="display: inline;"> Watermarking has offered an effective approach to distinguishing text generated by large language models (LLMs) from human-written text. However, the pervasive presence of human edits on LLM-generated text dilutes watermark signals, thereby significantly degrading detection performance of existing methods. In this paper, by modeling human edits through mixture model detection, we introduce a new m&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13868v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13868v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13868v1-abstract-full" style="display: none;"> Watermarking has offered an effective approach to distinguishing text generated by large language models (LLMs) from human-written text. However, the pervasive presence of human edits on LLM-generated text dilutes watermark signals, thereby significantly degrading detection performance of existing methods. In this paper, by modeling human edits through mixture model detection, we introduce a new method in the form of a truncated goodness-of-fit test for detecting watermarked text under human edits, which we refer to as Tr-GoF. We prove that the Tr-GoF test achieves optimality in robust detection of the Gumbel-max watermark in a certain asymptotic regime of substantial text modifications and vanishing watermark signals. Importantly, Tr-GoF achieves this optimality \textit{adaptively} as it does not require precise knowledge of human edit levels or probabilistic specifications of the LLMs, in contrast to the optimal but impractical (Neyman--Pearson) likelihood ratio test. Moreover, we establish that the Tr-GoF test attains the highest detection efficiency rate in a certain regime of moderate text modifications. In stark contrast, we show that sum-based detection rules, as employed by existing methods, fail to achieve optimal robustness in both regimes because the additive nature of their statistics is less resilient to edit-induced noise. Finally, we demonstrate the competitive and sometimes superior empirical performance of the Tr-GoF test on both synthetic data and open-source LLMs in the OPT and LLaMA families. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13868v1-abstract-full').style.display = 'none'; document.getElementById('2411.13868v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13785">arXiv:2411.13785</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13785">pdf</a>, <a href="https://arxiv.org/ps/2411.13785">ps</a>, <a href="https://arxiv.org/format/2411.13785">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Throughput Maximization for Movable Antenna Systems with Movement Delay Consideration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Honghao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Q">Qingqing Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+Y">Ying Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+W">Wen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Mei%2C+W">Weidong Mei</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+G">Guojie Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+L">Lexi Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13785v1-abstract-short" style="display: inline;"> In this paper, we model the minimum achievable throughput within a transmission block of restricted duration and aim to maximize it in movable antenna (MA)-enabled multiuser downlink communications. Particularly, we account for the antenna moving delay caused by mechanical movement, which has not been fully considered in previous studies, and reveal the trade-off between the delay and signal-to-in&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13785v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13785v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13785v1-abstract-full" style="display: none;"> In this paper, we model the minimum achievable throughput within a transmission block of restricted duration and aim to maximize it in movable antenna (MA)-enabled multiuser downlink communications. Particularly, we account for the antenna moving delay caused by mechanical movement, which has not been fully considered in previous studies, and reveal the trade-off between the delay and signal-to-interference-plus-noise ratio at users. To this end, we first consider a single-user setup to analyze the necessity of antenna movement. By quantizing the virtual angles of arrival, we derive the requisite region size for antenna moving, design the initial MA position, and elucidate the relationship between quantization resolution and moving region size. Furthermore, an efficient algorithm is developed to optimize MA position via successive convex approximation, which is subsequently extended to the general multiuser setup. Numerical results demonstrate that the proposed algorithms outperform fixed-position antenna schemes and existing ones without consideration of movement delay. Additionally, our algorithms exhibit excellent adaptability and stability across various transmission block durations and moving region sizes, and are robust to different antenna moving speeds. This allows the hardware cost of MA-aided systems to be reduced by employing low rotational speed motors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13785v1-abstract-full').style.display = 'none'; document.getElementById('2411.13785v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13584">arXiv:2411.13584</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13584">pdf</a>, <a href="https://arxiv.org/format/2411.13584">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> AddrLLM: Address Rewriting via Large Language Model on Nationwide Logistics Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Q">Qinchen Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+Z">Zhiqing Hong</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+D">Dongjiang Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haotian Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Z">Zejun Xie</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+T">Tian He</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yunhuai Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yu Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+D">Desheng Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13584v1-abstract-short" style="display: inline;"> Textual description of a physical location, commonly known as an address, plays an important role in location-based services(LBS) such as on-demand delivery and navigation. However, the prevalence of abnormal addresses, those containing inaccuracies that fail to pinpoint a location, have led to significant costs. Address rewriting has emerged as a solution to rectify these abnormal addresses. Desp&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13584v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13584v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13584v1-abstract-full" style="display: none;"> Textual description of a physical location, commonly known as an address, plays an important role in location-based services(LBS) such as on-demand delivery and navigation. However, the prevalence of abnormal addresses, those containing inaccuracies that fail to pinpoint a location, have led to significant costs. Address rewriting has emerged as a solution to rectify these abnormal addresses. Despite the critical need, existing address rewriting methods are limited, typically tailored to correct specific error types, or frequently require retraining to process new address data effectively. In this study, we introduce AddrLLM, an innovative framework for address rewriting that is built upon a retrieval augmented large language model. AddrLLM overcomes aforementioned limitations through a meticulously designed Supervised Fine-Tuning module, an Address-centric Retrieval Augmented Generation module and a Bias-free Objective Alignment module. To the best of our knowledge, this study pioneers the application of LLM-based address rewriting approach to solve the issue of abnormal addresses. Through comprehensive offline testing with real-world data on a national scale and subsequent online deployment, AddrLLM has demonstrated superior performance in integration with existing logistics system. It has significantly decreased the rate of parcel re-routing by approximately 43\%, underscoring its exceptional efficacy in real-world applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13584v1-abstract-full').style.display = 'none'; document.getElementById('2411.13584v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by KDD&#39;25 ADS Track</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13582">arXiv:2411.13582</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13582">pdf</a>, <a href="https://arxiv.org/format/2411.13582">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.neucom.2024.128848">10.1016/j.neucom.2024.128848 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Deep Feature Response Discriminative Calibration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+W">Wenxiang Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Qiu%2C+T">Tian Qiu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+L">Linyun Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+Z">Zunlei Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+M">Mingli Song</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Huiqiong Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13582v1-abstract-short" style="display: inline;"> Deep neural networks (DNNs) have numerous applications across various domains. Several optimization techniques, such as ResNet and SENet, have been proposed to improve model accuracy. These techniques improve the model performance by adjusting or calibrating feature responses according to a uniform standard. However, they lack the discriminative calibration for different features, thereby introduc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13582v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13582v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13582v1-abstract-full" style="display: none;"> Deep neural networks (DNNs) have numerous applications across various domains. Several optimization techniques, such as ResNet and SENet, have been proposed to improve model accuracy. These techniques improve the model performance by adjusting or calibrating feature responses according to a uniform standard. However, they lack the discriminative calibration for different features, thereby introducing limitations in the model output. Therefore, we propose a method that discriminatively calibrates feature responses. The preliminary experimental results indicate that the neural feature response follows a Gaussian distribution. Consequently, we compute confidence values by employing the Gaussian probability density function, and then integrate these values with the original response values. The objective of this integration is to improve the feature discriminability of the neural feature response. Based on the calibration values, we propose a plugin-based calibration module incorporated into a modified ResNet architecture, termed Response Calibration Networks (ResCNet). Extensive experiments on datasets like CIFAR-10, CIFAR-100, SVHN, and ImageNet demonstrate the effectiveness of the proposed approach. The developed code is publicly available at https://github.com/tcmyxc/ResCNet. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13582v1-abstract-full').style.display = 'none'; document.getElementById('2411.13582v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Neurocomputing 2025 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13577">arXiv:2411.13577</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13577">pdf</a>, <a href="https://arxiv.org/format/2411.13577">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> </div> <p class="title is-5 mathjax"> WavChat: A Survey of Spoken Dialogue Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ji%2C+S">Shengpeng Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yifu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+M">Minghui Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Zuo%2C+J">Jialong Zuo</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Jingyu Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hanting Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Ziyue Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+L">Long Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shujie Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+X">Xize Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+X">Xiaoda Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zehan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Q">Qian Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+J">Jian Li</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Y">Yidi Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+J">Jingzhen He</a>, <a href="/search/cs?searchtype=author&amp;query=Chu%2C+Y">Yunfei Chu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+J">Jin Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Z">Zhou Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13577v1-abstract-short" style="display: inline;"> Recent advancements in spoken dialogue models, exemplified by systems like GPT-4o, have captured significant attention in the speech domain. Compared to traditional three-tier cascaded spoken dialogue models that comprise speech recognition (ASR), large language models (LLMs), and text-to-speech (TTS), modern spoken dialogue models exhibit greater intelligence. These advanced spoken dialogue model&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13577v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13577v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13577v1-abstract-full" style="display: none;"> Recent advancements in spoken dialogue models, exemplified by systems like GPT-4o, have captured significant attention in the speech domain. Compared to traditional three-tier cascaded spoken dialogue models that comprise speech recognition (ASR), large language models (LLMs), and text-to-speech (TTS), modern spoken dialogue models exhibit greater intelligence. These advanced spoken dialogue models not only comprehend audio, music, and other speech-related features, but also capture stylistic and timbral characteristics in speech. Moreover, they generate high-quality, multi-turn speech responses with low latency, enabling real-time interaction through simultaneous listening and speaking capability. Despite the progress in spoken dialogue systems, there is a lack of comprehensive surveys that systematically organize and analyze these systems and the underlying technologies. To address this, we have first compiled existing spoken dialogue systems in the chronological order and categorized them into the cascaded and end-to-end paradigms. We then provide an in-depth overview of the core technologies in spoken dialogue models, covering aspects such as speech representation, training paradigm, streaming, duplex, and interaction capabilities. Each section discusses the limitations of these technologies and outlines considerations for future research. Additionally, we present a thorough review of relevant datasets, evaluation metrics, and benchmarks from the perspectives of training and evaluating spoken dialogue systems. We hope this survey will contribute to advancing both academic research and industrial applications in the field of spoken dialogue systems. The related material is available at https://github.com/jishengpeng/WavChat. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13577v1-abstract-full').style.display = 'none'; document.getElementById('2411.13577v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">60 papes, working in progress</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13547">arXiv:2411.13547</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13547">pdf</a>, <a href="https://arxiv.org/format/2411.13547">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> SpecTool: A Benchmark for Characterizing Errors in Tool-Use LLMs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kokane%2C+S">Shirley Kokane</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+M">Ming Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Awalgaonkar%2C+T">Tulika Awalgaonkar</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jianguo Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Hoang%2C+T">Thai Hoang</a>, <a href="/search/cs?searchtype=author&amp;query=Prabhakar%2C+A">Akshara Prabhakar</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zuxin Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Lan%2C+T">Tian Lan</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+L">Liangwei Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+J">Juntao Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Murthy%2C+R">Rithesh Murthy</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+W">Weiran Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhiwei Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Niebles%2C+J+C">Juan Carlos Niebles</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Huan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Heinecke%2C+S">Shelby Heinecke</a>, <a href="/search/cs?searchtype=author&amp;query=Xiong%2C+C">Caiming Xiong</a>, <a href="/search/cs?searchtype=author&amp;query=Savarese%2C+S">Silivo Savarese</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13547v1-abstract-short" style="display: inline;"> Evaluating the output of Large Language Models (LLMs) is one of the most critical aspects of building a performant compound AI system. Since the output from LLMs propagate to downstream steps, identifying LLM errors is crucial to system performance. A common task for LLMs in AI systems is tool use. While there are several benchmark environments for evaluating LLMs on this task, they typically only&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13547v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13547v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13547v1-abstract-full" style="display: none;"> Evaluating the output of Large Language Models (LLMs) is one of the most critical aspects of building a performant compound AI system. Since the output from LLMs propagate to downstream steps, identifying LLM errors is crucial to system performance. A common task for LLMs in AI systems is tool use. While there are several benchmark environments for evaluating LLMs on this task, they typically only give a success rate without any explanation of the failure cases. To solve this problem, we introduce SpecTool, a new benchmark to identify error patterns in LLM output on tool-use tasks. Our benchmark data set comprises of queries from diverse environments that can be used to test for the presence of seven newly characterized error patterns. Using SPECTOOL , we show that even the most prominent LLMs exhibit these error patterns in their outputs. Researchers can use the analysis and insights from SPECTOOL to guide their error mitigation strategies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13547v1-abstract-full').style.display = 'none'; document.getElementById('2411.13547v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13476">arXiv:2411.13476</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13476">pdf</a>, <a href="https://arxiv.org/format/2411.13476">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> When Precision Meets Position: BFloat16 Breaks Down RoPE in Long-Context Training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haonan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Q">Qian Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+C">Chao Du</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+T">Tongyao Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+C">Cunxiao Du</a>, <a href="/search/cs?searchtype=author&amp;query=Kawaguchi%2C+K">Kenji Kawaguchi</a>, <a href="/search/cs?searchtype=author&amp;query=Pang%2C+T">Tianyu Pang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13476v1-abstract-short" style="display: inline;"> Extending context window sizes allows large language models (LLMs) to process longer sequences and handle more complex tasks. Rotary Positional Embedding (RoPE) has become the de facto standard due to its relative positional encoding properties that benefit long-context training. However, we observe that using RoPE with BFloat16 format results in numerical issues, causing it to deviate from its in&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13476v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13476v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13476v1-abstract-full" style="display: none;"> Extending context window sizes allows large language models (LLMs) to process longer sequences and handle more complex tasks. Rotary Positional Embedding (RoPE) has become the de facto standard due to its relative positional encoding properties that benefit long-context training. However, we observe that using RoPE with BFloat16 format results in numerical issues, causing it to deviate from its intended relative positional encoding, especially in long-context scenarios. This issue arises from BFloat16&#39;s limited precision and accumulates as context length increases, with the first token contributing significantly to this problem. To address this, we develop AnchorAttention, a plug-and-play attention method that alleviates numerical issues caused by BFloat16, improves long-context capabilities, and speeds up training. AnchorAttention reduces unnecessary attention computations, maintains semantic coherence, and boosts computational efficiency by treating the first token as a shared anchor with a consistent position ID, making it visible to all documents within the training context. Experiments on three types of LLMs demonstrate that AnchorAttention significantly improves long-context performance and reduces training time by over 50\% compared to standard full attention mechanisms, while preserving the original LLM&#39;s capabilities on general tasks. Our code is available at https://github.com/haonan3/AnchorContext. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13476v1-abstract-full').style.display = 'none'; document.getElementById('2411.13476v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13040">arXiv:2411.13040</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13040">pdf</a>, <a href="https://arxiv.org/format/2411.13040">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> RobustFormer: Noise-Robust Pre-training for images and videos </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bastola%2C+A">Ashish Bastola</a>, <a href="/search/cs?searchtype=author&amp;query=Luitel%2C+N">Nishant Luitel</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Paudel%2C+D+P">Danda Pani Paudel</a>, <a href="/search/cs?searchtype=author&amp;query=Poudel%2C+R">Roshani Poudel</a>, <a href="/search/cs?searchtype=author&amp;query=Razi%2C+A">Abolfazl Razi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13040v1-abstract-short" style="display: inline;"> While deep learning models are powerful tools that revolutionized many areas, they are also vulnerable to noise as they rely heavily on learning patterns and features from the exact details of the clean data. Transformers, which have become the backbone of modern vision models, are no exception. Current Discrete Wavelet Transforms (DWT) based methods do not benefit from masked autoencoder (MAE) pr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13040v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13040v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13040v1-abstract-full" style="display: none;"> While deep learning models are powerful tools that revolutionized many areas, they are also vulnerable to noise as they rely heavily on learning patterns and features from the exact details of the clean data. Transformers, which have become the backbone of modern vision models, are no exception. Current Discrete Wavelet Transforms (DWT) based methods do not benefit from masked autoencoder (MAE) pre-training since the inverse DWT (iDWT) introduced in these approaches is computationally inefficient and lacks compatibility with video inputs in transformer architectures. In this work, we present RobustFormer, a method that overcomes these limitations by enabling noise-robust pre-training for both images and videos; improving the efficiency of DWT-based methods by removing the need for computationally iDWT steps and simplifying the attention mechanism. To our knowledge, the proposed method is the first DWT-based method compatible with video inputs and masked pre-training. Our experiments show that MAE-based pre-training allows us to bypass the iDWT step, greatly reducing computation. Through extensive tests on benchmark datasets, RobustFormer achieves state-of-the-art results for both image and video tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13040v1-abstract-full').style.display = 'none'; document.getElementById('2411.13040v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12930">arXiv:2411.12930</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12930">pdf</a>, <a href="https://arxiv.org/format/2411.12930">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> LEDRO: LLM-Enhanced Design Space Reduction and Optimization for Analog Circuits </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kochar%2C+D+V">Dimple Vijay Kochar</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hanrui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chandrakasan%2C+A">Anantha Chandrakasan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xin Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12930v1-abstract-short" style="display: inline;"> Traditional approaches for designing analog circuits are time-consuming and require significant human expertise. Existing automation efforts using methods like Bayesian Optimization (BO) and Reinforcement Learning (RL) are sub-optimal and costly to generalize across different topologies and technology nodes. In our work, we introduce a novel approach, LEDRO, utilizing Large Language Models (LLMs)&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12930v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12930v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12930v1-abstract-full" style="display: none;"> Traditional approaches for designing analog circuits are time-consuming and require significant human expertise. Existing automation efforts using methods like Bayesian Optimization (BO) and Reinforcement Learning (RL) are sub-optimal and costly to generalize across different topologies and technology nodes. In our work, we introduce a novel approach, LEDRO, utilizing Large Language Models (LLMs) in conjunction with optimization techniques to iteratively refine the design space for analog circuit sizing. LEDRO is highly generalizable compared to other RL and BO baselines, eliminating the need for design annotation or model training for different topologies or technology nodes. We conduct a comprehensive evaluation of our proposed framework and baseline on 22 different Op-Amp topologies across four FinFET technology nodes. Results demonstrate the superior performance of LEDRO as it outperforms our best baseline by an average of 13% FoM improvement with 2.15x speed-up on low complexity Op-Amps and 48% FoM improvement with 1.7x speed-up on high complexity Op-Amps. This highlights LEDRO&#39;s effective performance, efficiency, and generalizability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12930v1-abstract-full').style.display = 'none'; document.getElementById('2411.12930v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12814">arXiv:2411.12814</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12814">pdf</a>, <a href="https://arxiv.org/format/2411.12814">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Interactive Medical Image Segmentation: A Benchmark Dataset and Baseline </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+J">Junlong Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+B">Bin Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+J">Jin Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+G">Guoan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+T">Tianbin Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoyu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+R">Ruoyu Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+H">He Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Junren Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+J">JingWen Li</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+Y">Yanzhou Su</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+M">Min Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+J">Junjun He</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12814v1-abstract-short" style="display: inline;"> Interactive Medical Image Segmentation (IMIS) has long been constrained by the limited availability of large-scale, diverse, and densely annotated datasets, which hinders model generalization and consistent evaluation across different models. In this paper, we introduce the IMed-361M benchmark dataset, a significant advancement in general IMIS research. First, we collect and standardize over 6.4 m&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12814v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12814v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12814v1-abstract-full" style="display: none;"> Interactive Medical Image Segmentation (IMIS) has long been constrained by the limited availability of large-scale, diverse, and densely annotated datasets, which hinders model generalization and consistent evaluation across different models. In this paper, we introduce the IMed-361M benchmark dataset, a significant advancement in general IMIS research. First, we collect and standardize over 6.4 million medical images and their corresponding ground truth masks from multiple data sources. Then, leveraging the strong object recognition capabilities of a vision foundational model, we automatically generated dense interactive masks for each image and ensured their quality through rigorous quality control and granularity management. Unlike previous datasets, which are limited by specific modalities or sparse annotations, IMed-361M spans 14 modalities and 204 segmentation targets, totaling 361 million masks-an average of 56 masks per image. Finally, we developed an IMIS baseline network on this dataset that supports high-quality mask generation through interactive inputs, including clicks, bounding boxes, text prompts, and their combinations. We evaluate its performance on medical image segmentation tasks from multiple perspectives, demonstrating superior accuracy and scalability compared to existing interactive segmentation models. To facilitate research on foundational models in medical computer vision, we release the IMed-361M and model at https://github.com/uni-medical/IMIS-Bench. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12814v1-abstract-full').style.display = 'none'; document.getElementById('2411.12814v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12789">arXiv:2411.12789</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12789">pdf</a>, <a href="https://arxiv.org/format/2411.12789">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Automated 3D Physical Simulation of Open-world Scene with Gaussian Splatting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+H">Haoyu Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+X">Xingyue Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongqiu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zhiyu Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Long%2C+C">Chengjiang Long</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H">Hua Zou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12789v1-abstract-short" style="display: inline;"> Recent advancements in 3D generation models have opened new possibilities for simulating dynamic 3D object movements and customizing behaviors, yet creating this content remains challenging. Current methods often require manual assignment of precise physical properties for simulations or rely on video generation models to predict them, which is computationally intensive. In this paper, we rethink&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12789v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12789v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12789v1-abstract-full" style="display: none;"> Recent advancements in 3D generation models have opened new possibilities for simulating dynamic 3D object movements and customizing behaviors, yet creating this content remains challenging. Current methods often require manual assignment of precise physical properties for simulations or rely on video generation models to predict them, which is computationally intensive. In this paper, we rethink the usage of multi-modal large language model (MLLM) in physics-based simulation, and present Sim Anything, a physics-based approach that endows static 3D objects with interactive dynamics. We begin with detailed scene reconstruction and object-level 3D open-vocabulary segmentation, progressing to multi-view image in-painting. Inspired by human visual reasoning, we propose MLLM-based Physical Property Perception (MLLM-P3) to predict mean physical properties of objects in a zero-shot manner. Based on the mean values and the object&#39;s geometry, the Material Property Distribution Prediction model (MPDP) model then estimates the full distribution, reformulating the problem as probability distribution estimation to reduce computational costs. Finally, we simulate objects in an open-world scene with particles sampled via the Physical-Geometric Adaptive Sampling (PGAS) strategy, efficiently capturing complex deformations and significantly reducing computational costs. Extensive experiments and user studies demonstrate our Sim Anything achieves more realistic motion than state-of-the-art methods within 2 minutes on a single GPU. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12789v1-abstract-full').style.display = 'none'; document.getElementById('2411.12789v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12588">arXiv:2411.12588</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12588">pdf</a>, <a href="https://arxiv.org/format/2411.12588">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Learning To Sample the Meta-Paths for Social Event Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ma%2C+C">Congbo Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Qiu%2C+Z">Zitai Qiu</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+S">Shan Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+J">Jia Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jian Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Nakov%2C+P">Preslav Nakov</a>, <a href="/search/cs?searchtype=author&amp;query=Sheng%2C+Q+Z">Quan Z. Sheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12588v1-abstract-short" style="display: inline;"> Social media data is inherently rich, as it includes not only text content, but also users, geolocation, entities, temporal information, and their relationships. This data richness can be effectively modeled using heterogeneous information networks (HINs) as it can handle multiple types of nodes and relationships, allowing for a comprehensive representation of complex interactions within social da&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12588v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12588v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12588v1-abstract-full" style="display: none;"> Social media data is inherently rich, as it includes not only text content, but also users, geolocation, entities, temporal information, and their relationships. This data richness can be effectively modeled using heterogeneous information networks (HINs) as it can handle multiple types of nodes and relationships, allowing for a comprehensive representation of complex interactions within social data. Meta-path-based methods use the sequences of relationships between different types of nodes in an HIN to capture the diverse and rich relationships within the social networks. However, the performance of social event detection methods is highly sensitive to the selection of meta-paths and existing meta-path based detectors either rely on human efforts or struggle to determining the effective meta-path set for model training and evaluation. In order to automatically discover the most important meta-paths, we propose a simple, yet effective, end-to-end Learning To Sample (LTS) framework for meta-path searching. Specifically, we build graphs that contain not only user profiles, textual content, and details about entities, but also the intricate relationships among them. The prioritized meta-paths, based on their importance, are sampled from the maintained distribution and their features are constructed before feeding into the social event detector. After picking up the top-ranked meta-paths, we streamline the exponential increment of meta-path combinations into a finite set of highly influential ones. The chosen meta-paths, along with their respective weights, are then used to train our social event detection model. As an alternative to social event detector training, we further propose an extra non-parametric evaluation process in order to determine the importance of each meta-path, which can further guide the paths sampling during model training. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12588v1-abstract-full').style.display = 'none'; document.getElementById('2411.12588v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12547">arXiv:2411.12547</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12547">pdf</a>, <a href="https://arxiv.org/format/2411.12547">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> S3TU-Net: Structured Convolution and Superpixel Transformer for Lung Nodule Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yuke Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xiang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+Y">Yunyu Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xinyi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhenglei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">YuQing Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S+H">Shuo Hong Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12547v1-abstract-short" style="display: inline;"> The irregular and challenging characteristics of lung adenocarcinoma nodules in computed tomography (CT) images complicate staging diagnosis, making accurate segmentation critical for clinicians to extract detailed lesion information. In this study, we propose a segmentation model, S3TU-Net, which integrates multi-dimensional spatial connectors and a superpixel-based visual transformer. S3TU-Net i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12547v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12547v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12547v1-abstract-full" style="display: none;"> The irregular and challenging characteristics of lung adenocarcinoma nodules in computed tomography (CT) images complicate staging diagnosis, making accurate segmentation critical for clinicians to extract detailed lesion information. In this study, we propose a segmentation model, S3TU-Net, which integrates multi-dimensional spatial connectors and a superpixel-based visual transformer. S3TU-Net is built on a multi-view CNN-Transformer hybrid architecture, incorporating superpixel algorithms, structured weighting, and spatial shifting techniques to achieve superior segmentation performance. The model leverages structured convolution blocks (DWF-Conv/D2BR-Conv) to extract multi-scale local features while mitigating overfitting. To enhance multi-scale feature fusion, we introduce the S2-MLP Link, integrating spatial shifting and attention mechanisms at the skip connections. Additionally, the residual-based superpixel visual transformer (RM-SViT) effectively merges global and local features by employing sparse correlation learning and multi-branch attention to capture long-range dependencies, with residual connections enhancing stability and computational efficiency. Experimental results on the LIDC-IDRI dataset demonstrate that S3TU-Net achieves a DSC, precision, and IoU of 89.04%, 90.73%, and 90.70%, respectively. Compared to recent methods, S3TU-Net improves DSC by 4.52% and sensitivity by 3.16%, with other metrics showing an approximate 2% increase. In addition to comparison and ablation studies, we validated the generalization ability of our model on the EPDB private dataset, achieving a DSC of 86.40%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12547v1-abstract-full').style.display = 'none'; document.getElementById('2411.12547v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12276">arXiv:2411.12276</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12276">pdf</a>, <a href="https://arxiv.org/format/2411.12276">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> libcll: an Extendable Python Toolkit for Complementary-Label Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ye%2C+N">Nai-Xuan Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Mai%2C+T">Tan-Ha Mai</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hsiu-Hsuan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+W">Wei-I Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+H">Hsuan-Tien Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12276v1-abstract-short" style="display: inline;"> Complementary-label learning (CLL) is a weakly supervised learning paradigm for multiclass classification, where only complementary labels -- indicating classes an instance does not belong to -- are provided to the learning algorithm. Despite CLL&#39;s increasing popularity, previous studies highlight two main challenges: (1) inconsistent results arising from varied assumptions on complementary label&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12276v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12276v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12276v1-abstract-full" style="display: none;"> Complementary-label learning (CLL) is a weakly supervised learning paradigm for multiclass classification, where only complementary labels -- indicating classes an instance does not belong to -- are provided to the learning algorithm. Despite CLL&#39;s increasing popularity, previous studies highlight two main challenges: (1) inconsistent results arising from varied assumptions on complementary label generation, and (2) high barriers to entry due to the lack of a standardized evaluation platform across datasets and algorithms. To address these challenges, we introduce \texttt{libcll}, an extensible Python toolkit for CLL research. \texttt{libcll} provides a universal interface that supports a wide range of generation assumptions, both synthetic and real-world datasets, and key CLL algorithms. The toolkit is designed to mitigate inconsistencies and streamline the research process, with easy installation, comprehensive usage guides, and quickstart tutorials that facilitate efficient adoption and implementation of CLL techniques. Extensive ablation studies conducted with \texttt{libcll} demonstrate its utility in generating valuable insights to advance future CLL research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12276v1-abstract-full').style.display = 'none'; document.getElementById('2411.12276v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12182">arXiv:2411.12182</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12182">pdf</a>, <a href="https://arxiv.org/format/2411.12182">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Diffusion-Inspired Cold Start with Sufficient Prior in Computerized Adaptive Testing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ma%2C+H">Haiping Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+A">Aoqing Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Changqian Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hai Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xingyi Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12182v1-abstract-short" style="display: inline;"> Computerized Adaptive Testing (CAT) aims to select the most appropriate questions based on the examinee&#39;s ability and is widely used in online education. However, existing CAT systems often lack initial understanding of the examinee&#39;s ability, requiring random probing questions. This can lead to poorly matched questions, extending the test duration and negatively impacting the examinee&#39;s mindset,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12182v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12182v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12182v1-abstract-full" style="display: none;"> Computerized Adaptive Testing (CAT) aims to select the most appropriate questions based on the examinee&#39;s ability and is widely used in online education. However, existing CAT systems often lack initial understanding of the examinee&#39;s ability, requiring random probing questions. This can lead to poorly matched questions, extending the test duration and negatively impacting the examinee&#39;s mindset, a phenomenon referred to as the Cold Start with Insufficient Prior (CSIP) task. This issue occurs because CAT systems do not effectively utilize the abundant prior information about the examinee available from other courses on online platforms. These response records, due to the commonality of cognitive states across different knowledge domains, can provide valuable prior information for the target domain. However, no prior work has explored solutions for the CSIP task. In response to this gap, we propose Diffusion Cognitive States TransfeR Framework (DCSR), a novel domain transfer framework based on Diffusion Models (DMs) to address the CSIP task. Specifically, we construct a cognitive state transition bridge between domains, guided by the common cognitive states of examinees, encouraging the model to reconstruct the initial ability state in the target domain. To enrich the expressive power of the generated data, we analyze the causal relationships in the generation process from a causal perspective. Redundant and extraneous cognitive states can lead to limited transfer and negative transfer effects. Our DCSR can seamlessly apply the generated initial ability states in the target domain to existing question selection algorithms, thus improving the cold start performance of the CAT system. Extensive experiments conducted on five real-world datasets demonstrate that DCSR significantly outperforms existing baseline methods in addressing the CSIP task. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12182v1-abstract-full').style.display = 'none'; document.getElementById('2411.12182v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by KDD2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11741">arXiv:2411.11741</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11741">pdf</a>, <a href="https://arxiv.org/format/2411.11741">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Probability">math.PR</span> </div> </div> <p class="title is-5 mathjax"> A Bicriterion Concentration Inequality and Prophet Inequalities for $k$-Fold Matroid Unions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Alon%2C+N">Noga Alon</a>, <a href="/search/cs?searchtype=author&amp;query=Gravin%2C+N">Nick Gravin</a>, <a href="/search/cs?searchtype=author&amp;query=Pollner%2C+T">Tristan Pollner</a>, <a href="/search/cs?searchtype=author&amp;query=Rubinstein%2C+A">Aviad Rubinstein</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Weinberg%2C+S+M">S. Matthew Weinberg</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qianfan Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11741v2-abstract-short" style="display: inline;"> We investigate prophet inequalities with competitive ratios approaching $1$, seeking to generalize $k$-uniform matroids. We first show that large girth does not suffice: for all $k$, there exists a matroid of girth $\geq k$ and a prophet inequality instance on that matroid whose optimal competitive ratio is $\frac{1}{2}$. Next, we show $k$-fold matroid unions do suffice: we provide a prophet inequ&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11741v2-abstract-full').style.display = 'inline'; document.getElementById('2411.11741v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11741v2-abstract-full" style="display: none;"> We investigate prophet inequalities with competitive ratios approaching $1$, seeking to generalize $k$-uniform matroids. We first show that large girth does not suffice: for all $k$, there exists a matroid of girth $\geq k$ and a prophet inequality instance on that matroid whose optimal competitive ratio is $\frac{1}{2}$. Next, we show $k$-fold matroid unions do suffice: we provide a prophet inequality with competitive ratio $1-O(\sqrt{\frac{\log k}{k}})$ for any $k$-fold matroid union. Our prophet inequality follows from an online contention resolution scheme. The key technical ingredient in our online contention resolution scheme is a novel bicriterion concentration inequality for arbitrary monotone $1$-Lipschitz functions over independent items which may be of independent interest. Applied to our particular setting, our bicriterion concentration inequality yields &#34;Chernoff-strength&#34; concentration for a $1$-Lipschitz function that is not (approximately) self-bounding. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11741v2-abstract-full').style.display = 'none'; document.getElementById('2411.11741v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in ITCS 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11641">arXiv:2411.11641</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11641">pdf</a>, <a href="https://arxiv.org/format/2411.11641">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> TSINR: Capturing Temporal Continuity via Implicit Neural Representations for Time Series Anomaly Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Mengxuan Li</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+K">Ke Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Hongyang Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Bu%2C+J">Jiajun Bu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongwei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haishuai Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11641v2-abstract-short" style="display: inline;"> Time series anomaly detection aims to identify unusual patterns in data or deviations from systems&#39; expected behavior. The reconstruction-based methods are the mainstream in this task, which learn point-wise representation via unsupervised learning. However, the unlabeled anomaly points in training data may cause these reconstruction-based methods to learn and reconstruct anomalous data, resulting&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11641v2-abstract-full').style.display = 'inline'; document.getElementById('2411.11641v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11641v2-abstract-full" style="display: none;"> Time series anomaly detection aims to identify unusual patterns in data or deviations from systems&#39; expected behavior. The reconstruction-based methods are the mainstream in this task, which learn point-wise representation via unsupervised learning. However, the unlabeled anomaly points in training data may cause these reconstruction-based methods to learn and reconstruct anomalous data, resulting in the challenge of capturing normal patterns. In this paper, we propose a time series anomaly detection method based on implicit neural representation (INR) reconstruction, named TSINR, to address this challenge. Due to the property of spectral bias, TSINR enables prioritizing low-frequency signals and exhibiting poorer performance on high-frequency abnormal data. Specifically, we adopt INR to parameterize time series data as a continuous function and employ a transformer-based architecture to predict the INR of given data. As a result, the proposed TSINR method achieves the advantage of capturing the temporal continuity and thus is more sensitive to discontinuous anomaly data. In addition, we further design a novel form of INR continuous function to learn inter- and intra-channel information, and leverage a pre-trained large language model to amplify the intense fluctuations in anomalies. Extensive experiments demonstrate that TSINR achieves superior overall performance on both univariate and multivariate time series anomaly detection benchmarks compared to other state-of-the-art reconstruction-based methods. Our codes are available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11641v2-abstract-full').style.display = 'none'; document.getElementById('2411.11641v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by SIGKDD 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11562">arXiv:2411.11562</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11562">pdf</a>, <a href="https://arxiv.org/format/2411.11562">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> MSSIDD: A Benchmark for Multi-Sensor Denoising </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mei%2C+S">Shibin Mei</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Ni%2C+B">Bingbing Ni</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11562v1-abstract-short" style="display: inline;"> The cameras equipped on mobile terminals employ different sensors in different photograph modes, and the transferability of raw domain denoising models between these sensors is significant but remains sufficient exploration. Industrial solutions either develop distinct training strategies and models for different sensors or ignore the differences between sensors and simply extend existing models t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11562v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11562v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11562v1-abstract-full" style="display: none;"> The cameras equipped on mobile terminals employ different sensors in different photograph modes, and the transferability of raw domain denoising models between these sensors is significant but remains sufficient exploration. Industrial solutions either develop distinct training strategies and models for different sensors or ignore the differences between sensors and simply extend existing models to new sensors, which leads to tedious training or unsatisfactory performance. In this paper, we introduce a new benchmark, the Multi-Sensor SIDD (MSSIDD) dataset, which is the first raw-domain dataset designed to evaluate the sensor transferability of denoising models. The MSSIDD dataset consists of 60,000 raw images of six distinct sensors, derived through the degeneration of sRGB images via different camera sensor parameters. Furthermore, we propose a sensor consistency training framework that enables denoising models to learn the sensor-invariant features, thereby facilitating the generalization of the consistent model to unseen sensors. We evaluate previous arts on the newly proposed MSSIDD dataset, and the experimental results validate the effectiveness of our proposed method. Our dataset is available at https://www.kaggle.com/datasets/sjtuwh/mssidd. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11562v1-abstract-full').style.display = 'none'; document.getElementById('2411.11562v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages,7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11532">arXiv:2411.11532</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11532">pdf</a>, <a href="https://arxiv.org/format/2411.11532">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> A Code Knowledge Graph-Enhanced System for LLM-Based Fuzz Driver Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+H">Hanxiang Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+W">Wei Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+T">Ting Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yanjie Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+K">Kai Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Q">Qiang Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoyu Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11532v1-abstract-short" style="display: inline;"> The rapid development of large language models (LLMs) with advanced programming capabilities has paved the way for innovative approaches in software testing. Fuzz testing, a cornerstone for improving software reliability and detecting vulnerabilities, often relies on manually written fuzz drivers, limiting scalability and efficiency. To address this challenge, we propose CodeGraphGPT, a novel syst&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11532v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11532v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11532v1-abstract-full" style="display: none;"> The rapid development of large language models (LLMs) with advanced programming capabilities has paved the way for innovative approaches in software testing. Fuzz testing, a cornerstone for improving software reliability and detecting vulnerabilities, often relies on manually written fuzz drivers, limiting scalability and efficiency. To address this challenge, we propose CodeGraphGPT, a novel system that integrates code knowledge graphs with an LLM-powered intelligent agent to automate the fuzz driver generation process. By framing fuzz driver creation as a code generation task, CodeGraphGPT leverages program analysis to construct a knowledge graph of code repositories, where nodes represent code entities, such as functions or files, and edges capture their relationships. This enables the system to generate tailored fuzz drivers and input seeds, resolve compilation errors, and analyze crash reports, all while adapting to specific API usage scenarios. Additionally, querying the knowledge graph helps identify precise testing targets and contextualize the purpose of each fuzz driver within the fuzzing loop. We evaluated CodeGraphGPT on eight open-source software projects, achieving an average improvement of 8.73\% in code coverage compared to state-of-the-art methods. Moreover, it reduced the manual workload in crash case analysis by 84.4\% and identified 11 real-world bugs, including nine previously unreported ones. This work highlights how integrating LLMs with code knowledge graphs enhances fuzz driver generation, offering an efficient solution for vulnerability detection and software quality improvement. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11532v1-abstract-full').style.display = 'none'; document.getElementById('2411.11532v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11448">arXiv:2411.11448</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11448">pdf</a>, <a href="https://arxiv.org/format/2411.11448">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Unveiling the Inflexibility of Adaptive Embedding in Traffic Forecasting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongjun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jiyuan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Lingyu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+R">Renhe Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+X">Xuan Song</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11448v1-abstract-short" style="display: inline;"> Spatiotemporal Graph Neural Networks (ST-GNNs) and Transformers have shown significant promise in traffic forecasting by effectively modeling temporal and spatial correlations. However, rapid urbanization in recent years has led to dynamic shifts in traffic patterns and travel demand, posing major challenges for accurate long-term traffic prediction. The generalization capability of ST-GNNs in ext&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11448v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11448v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11448v1-abstract-full" style="display: none;"> Spatiotemporal Graph Neural Networks (ST-GNNs) and Transformers have shown significant promise in traffic forecasting by effectively modeling temporal and spatial correlations. However, rapid urbanization in recent years has led to dynamic shifts in traffic patterns and travel demand, posing major challenges for accurate long-term traffic prediction. The generalization capability of ST-GNNs in extended temporal scenarios and cross-city applications remains largely unexplored. In this study, we evaluate state-of-the-art models on an extended traffic benchmark and observe substantial performance degradation in existing ST-GNNs over time, which we attribute to their limited inductive capabilities. Our analysis reveals that this degradation stems from an inability to adapt to evolving spatial relationships within urban environments. To address this limitation, we reconsider the design of adaptive embeddings and propose a Principal Component Analysis (PCA) embedding approach that enables models to adapt to new scenarios without retraining. We incorporate PCA embeddings into existing ST-GNN and Transformer architectures, achieving marked improvements in performance. Notably, PCA embeddings allow for flexibility in graph structures between training and testing, enabling models trained on one city to perform zero-shot predictions on other cities. This adaptability demonstrates the potential of PCA embeddings in enhancing the robustness and generalization of spatiotemporal models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11448v1-abstract-full').style.display = 'none'; document.getElementById('2411.11448v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11340">arXiv:2411.11340</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11340">pdf</a>, <a href="https://arxiv.org/format/2411.11340">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> A Hybrid Loss Framework for Decomposition-based Time Series Forecasting Methods: Balancing Global and Component Errors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Han%2C+R">Ronghui Han</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+D">Duanyu Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+H">Hongyu Du</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hao Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11340v1-abstract-short" style="display: inline;"> Accurate time series forecasting, predicting future values based on past data, is crucial for diverse industries. Many current time series methods decompose time series into multiple sub-series, applying different model architectures and training with an end-to-end overall loss for forecasting. However, this raises a question: does this overall loss prioritize the importance of critical sub-series&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11340v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11340v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11340v1-abstract-full" style="display: none;"> Accurate time series forecasting, predicting future values based on past data, is crucial for diverse industries. Many current time series methods decompose time series into multiple sub-series, applying different model architectures and training with an end-to-end overall loss for forecasting. However, this raises a question: does this overall loss prioritize the importance of critical sub-series within the decomposition for the better performance? To investigate this, we conduct a study on the impact of overall loss on existing time series methods with sequence decomposition. Our findings reveal that overall loss may introduce bias in model learning, hindering the learning of the prioritization of more significant sub-series and limiting the forecasting performance. To address this, we propose a hybrid loss framework combining the global and component losses. This framework introduces component losses for each sub-series alongside the original overall loss. It employs a dual min-max algorithm to dynamically adjust weights between the overall loss and component losses, and within component losses. This enables the model to achieve better performance of current time series methods by focusing on more critical sub-series while still maintaining a low overall loss. We integrate our loss framework into several time series methods and evaluate the performance on multiple datasets. Results show an average improvement of 0.5-2% over existing methods without any modifications to the model architectures. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11340v1-abstract-full').style.display = 'none'; document.getElementById('2411.11340v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11335">arXiv:2411.11335</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11335">pdf</a>, <a href="https://arxiv.org/format/2411.11335">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Video-to-Task Learning via Motion-Guided Attention for Few-Shot Action Recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Guo%2C+H">Hanyu Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+W">Wanchuan Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Que%2C+S">Suzhou Que</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+K">Kaiwen Du</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+Y">Yan Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hanzi Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11335v1-abstract-short" style="display: inline;"> In recent years, few-shot action recognition has achieved remarkable performance through spatio-temporal relation modeling. Although a wide range of spatial and temporal alignment modules have been proposed, they primarily address spatial or temporal misalignments at the video level, while the spatio-temporal relationships across different videos at the task level remain underexplored. Recent stud&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11335v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11335v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11335v1-abstract-full" style="display: none;"> In recent years, few-shot action recognition has achieved remarkable performance through spatio-temporal relation modeling. Although a wide range of spatial and temporal alignment modules have been proposed, they primarily address spatial or temporal misalignments at the video level, while the spatio-temporal relationships across different videos at the task level remain underexplored. Recent studies utilize class prototypes to learn task-specific features but overlook the spatio-temporal relationships across different videos at the task level, especially in the spatial dimension, where these relationships provide rich information. In this paper, we propose a novel Dual Motion-Guided Attention Learning method (called DMGAL) for few-shot action recognition, aiming to learn the spatio-temporal relationships from the video-specific to the task-specific level. To achieve this, we propose a carefully designed Motion-Guided Attention (MGA) method to identify and correlate motion-related region features from the video level to the task level. Specifically, the Self Motion-Guided Attention module (S-MGA) achieves spatio-temporal relation modeling at the video level by identifying and correlating motion-related region features between different frames within a video. The Cross Motion-Guided Attention module (C-MGA) identifies and correlates motion-related region features between frames of different videos within a specific task to achieve spatio-temporal relationships at the task level. This approach enables the model to construct class prototypes that fully incorporate spatio-temporal relationships from the video-specific level to the task-specific level. We validate the effectiveness of our DMGAL method by employing both fully fine-tuning and adapter-tuning paradigms. The models developed using these paradigms are termed DMGAL-FT and DMGAL-Adapter, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11335v1-abstract-full').style.display = 'none'; document.getElementById('2411.11335v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11295">arXiv:2411.11295</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11295">pdf</a>, <a href="https://arxiv.org/format/2411.11295">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Transcending Language Boundaries: Harnessing LLMs for Low-Resource Language Translation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shu%2C+P">Peng Shu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Junhao Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhengliang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zihao Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhong%2C+T">Tianyang Zhong</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yiwei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+H">Huaqin Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+H">Hanqi Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+Y">Yi Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yifan Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Owl%2C+C">Constance Owl</a>, <a href="/search/cs?searchtype=author&amp;query=Zhai%2C+X">Xiaoming Zhai</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+N">Ninghao Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Saunt%2C+C">Claudio Saunt</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+T">Tianming Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11295v1-abstract-short" style="display: inline;"> Large Language Models (LLMs) have demonstrated remarkable success across a wide range of tasks and domains. However, their performance in low-resource language translation, particularly when translating into these languages, remains underexplored. This gap poses significant challenges, as linguistic barriers hinder the cultural preservation and development of minority communities. To address this&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11295v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11295v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11295v1-abstract-full" style="display: none;"> Large Language Models (LLMs) have demonstrated remarkable success across a wide range of tasks and domains. However, their performance in low-resource language translation, particularly when translating into these languages, remains underexplored. This gap poses significant challenges, as linguistic barriers hinder the cultural preservation and development of minority communities. To address this issue, this paper introduces a novel retrieval-based method that enhances translation quality for low-resource languages by focusing on key terms, which involves translating keywords and retrieving corresponding examples from existing data. To evaluate the effectiveness of this method, we conducted experiments translating from English into three low-resource languages: Cherokee, a critically endangered indigenous language of North America; Tibetan, a historically and culturally significant language in Asia; and Manchu, a language with few remaining speakers. Our comparison with the zero-shot performance of GPT-4o and LLaMA 3.1 405B, highlights the significant challenges these models face when translating into low-resource languages. In contrast, our retrieval-based method shows promise in improving both word-level accuracy and overall semantic understanding by leveraging existing resources more effectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11295v1-abstract-full').style.display = 'none'; document.getElementById('2411.11295v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10848">arXiv:2411.10848</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10848">pdf</a>, <a href="https://arxiv.org/format/2411.10848">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Engineering, Finance, and Science">cs.CE</span> </div> </div> <p class="title is-5 mathjax"> NeuroNURBS: Learning Efficient Surface Representations for 3D Solids </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Fan%2C+J">Jiajie Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Gholami%2C+B">Babak Gholami</a>, <a href="/search/cs?searchtype=author&amp;query=B%C3%A4ck%2C+T">Thomas B盲ck</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hao Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10848v1-abstract-short" style="display: inline;"> Boundary Representation (B-Rep) is the de facto representation of 3D solids in Computer-Aided Design (CAD). B-Rep solids are defined with a set of NURBS (Non-Uniform Rational B-Splines) surfaces forming a closed volume. To represent a surface, current works often employ the UV-grid approximation, i.e., sample points uniformly on the surface. However, the UV-grid method is not efficient in surface&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10848v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10848v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10848v1-abstract-full" style="display: none;"> Boundary Representation (B-Rep) is the de facto representation of 3D solids in Computer-Aided Design (CAD). B-Rep solids are defined with a set of NURBS (Non-Uniform Rational B-Splines) surfaces forming a closed volume. To represent a surface, current works often employ the UV-grid approximation, i.e., sample points uniformly on the surface. However, the UV-grid method is not efficient in surface representation and sometimes lacks precision and regularity. In this work, we propose NeuroNURBS, a representation learning method to directly encode the parameters of NURBS surfaces. Our evaluation in solid generation and segmentation tasks indicates that the NeuroNURBS performs comparably and, in some cases, superior to UV-grids, but with a significantly improved efficiency: for training the surface autoencoder, GPU consumption is reduced by 86.7%; memory requirement drops by 79.9% for storing 3D solids. Moreover, adapting BrepGen for solid generation with our NeuroNURBS improves the FID from 30.04 to 27.24, and resolves the undulating issue in generated surfaces. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10848v1-abstract-full').style.display = 'none'; document.getElementById('2411.10848v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10831">arXiv:2411.10831</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10831">pdf</a>, <a href="https://arxiv.org/format/2411.10831">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Neighboring Slice Noise2Noise: Self-Supervised Medical Image Denoising from Single Noisy Image Volume </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+L">Langrui Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Z">Ziteng Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+X">Xinyu Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xiangyu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Huiru Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+G">Guang Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10831v1-abstract-short" style="display: inline;"> In the last few years, with the rapid development of deep learning technologies, supervised methods based on convolutional neural networks have greatly enhanced the performance of medical image denoising. However, these methods require large quantities of noisy-clean image pairs for training, which greatly limits their practicality. Although some researchers have attempted to train denoising netwo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10831v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10831v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10831v1-abstract-full" style="display: none;"> In the last few years, with the rapid development of deep learning technologies, supervised methods based on convolutional neural networks have greatly enhanced the performance of medical image denoising. However, these methods require large quantities of noisy-clean image pairs for training, which greatly limits their practicality. Although some researchers have attempted to train denoising networks using only single noisy images, existing self-supervised methods, including blind-spot-based and data-splitting-based methods, heavily rely on the assumption that noise is pixel-wise independent. However, this assumption often does not hold in real-world medical images. Therefore, in the field of medical imaging, there remains a lack of simple and practical denoising methods that can achieve high-quality denoising performance using only single noisy images. In this paper, we propose a novel self-supervised medical image denoising method, Neighboring Slice Noise2Noise (NS-N2N). The proposed method utilizes neighboring slices within a single noisy image volume to construct weighted training data, and then trains the denoising network using a self-supervised scheme with regional consistency loss and inter-slice continuity loss. NS-N2N only requires a single noisy image volume obtained from one medical imaging procedure to achieve high-quality denoising of the image volume itself. Extensive experiments demonstrate that the proposed method outperforms state-of-the-art self-supervised denoising methods in both denoising performance and processing efficiency. Furthermore, since NS-N2N operates solely in the image domain, it is free from device-specific issues such as reconstruction geometry, making it easier to apply in various clinical practices. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10831v1-abstract-full').style.display = 'none'; document.getElementById('2411.10831v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10321">arXiv:2411.10321</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10321">pdf</a>, <a href="https://arxiv.org/format/2411.10321">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Probabilistic Prior Driven Attention Mechanism Based on Diffusion Model for Imaging Through Atmospheric Turbulence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sun%2C+G">Guodong Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Q">Qixiang Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Liqiang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongwei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+Z">Zixuan Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Haotian Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10321v1-abstract-short" style="display: inline;"> Atmospheric turbulence introduces severe spatial and geometric distortions, challenging traditional image restoration methods. We propose the Probabilistic Prior Turbulence Removal Network (PPTRN), which combines probabilistic diffusion-based prior modeling with Transformer-driven feature extraction to address this issue. PPTRN employs a two-stage approach: first, a latent encoder and Transformer&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10321v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10321v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10321v1-abstract-full" style="display: none;"> Atmospheric turbulence introduces severe spatial and geometric distortions, challenging traditional image restoration methods. We propose the Probabilistic Prior Turbulence Removal Network (PPTRN), which combines probabilistic diffusion-based prior modeling with Transformer-driven feature extraction to address this issue. PPTRN employs a two-stage approach: first, a latent encoder and Transformer are jointly trained on clear images to establish robust feature representations. Then, a Denoising Diffusion Probabilistic Model (DDPM) models prior distributions over latent vectors, guiding the Transformer in capturing diverse feature variations essential for restoration. A key innovation in PPTRN is the Probabilistic Prior Driven Cross Attention mechanism, which integrates the DDPM-generated prior with feature embeddings to reduce artifacts and enhance spatial coherence. Extensive experiments validate that PPTRN significantly improves restoration quality on turbulence-degraded images, setting a new benchmark in clarity and structural fidelity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10321v1-abstract-full').style.display = 'none'; document.getElementById('2411.10321v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10280">arXiv:2411.10280</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10280">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> From Score-Driven to Value-Sharing: Understanding Chinese Family Use of AI to Support Decision Making of College Applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+S">Si Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+J">Jingyi Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+G">Ge Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haizhou Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+H">Haocong Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Y">Yun Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10280v1-abstract-short" style="display: inline;"> This study investigates how 18-year-old students, parents, and experts in China utilize artificial intelligence (AI) tools to support decision-making in college applications during college entrance exam -- a highly competitive, score-driven, annual national exam. Through 32 interviews, we examine the use of Quark GaoKao, an AI tool that generates college application lists and acceptance probabilit&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10280v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10280v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10280v1-abstract-full" style="display: none;"> This study investigates how 18-year-old students, parents, and experts in China utilize artificial intelligence (AI) tools to support decision-making in college applications during college entrance exam -- a highly competitive, score-driven, annual national exam. Through 32 interviews, we examine the use of Quark GaoKao, an AI tool that generates college application lists and acceptance probabilities based on exam scores, historical data, preferred locations, etc. Our findings show that AI tools are predominantly used by parents with limited involvement from students, and often focus on immediate exam results, failing to address long-term career goals. We also identify challenges such as misleading AI recommendations, and irresponsible use of AI by third-party consultant agencies. Finally, we offer design insights to better support multi-stakeholders&#39; decision-making in families, especially in the Chinese context, and discuss how emerging AI tools create barriers for families with fewer resources. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10280v1-abstract-full').style.display = 'none'; document.getElementById('2411.10280v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10261">arXiv:2411.10261</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10261">pdf</a>, <a href="https://arxiv.org/format/2411.10261">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Partial Scene Text Retrieval </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liao%2C+M">Minghui Liao</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Z">Zhouyi Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+W">Wenyu Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+X">Xiang Bai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10261v2-abstract-short" style="display: inline;"> The task of partial scene text retrieval involves localizing and searching for text instances that are the same or similar to a given query text from an image gallery. However, existing methods can only handle text-line instances, leaving the problem of searching for partial patches within these text-line instances unsolved due to a lack of patch annotations in the training data. To address this i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10261v2-abstract-full').style.display = 'inline'; document.getElementById('2411.10261v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10261v2-abstract-full" style="display: none;"> The task of partial scene text retrieval involves localizing and searching for text instances that are the same or similar to a given query text from an image gallery. However, existing methods can only handle text-line instances, leaving the problem of searching for partial patches within these text-line instances unsolved due to a lack of patch annotations in the training data. To address this issue, we propose a network that can simultaneously retrieve both text-line instances and their partial patches. Our method embeds the two types of data (query text and scene text instances) into a shared feature space and measures their cross-modal similarities. To handle partial patches, our proposed approach adopts a Multiple Instance Learning (MIL) approach to learn their similarities with query text, without requiring extra annotations. However, constructing bags, which is a standard step of conventional MIL approaches, can introduce numerous noisy samples for training, and lower inference speed. To address this issue, we propose a Ranking MIL (RankMIL) approach to adaptively filter those noisy samples. Additionally, we present a Dynamic Partial Match Algorithm (DPMA) that can directly search for the target partial patch from a text-line instance during the inference stage, without requiring bags. This greatly improves the search efficiency and the performance of retrieving partial patches. The source code and dataset are available at https://github.com/lanfeng4659/PSTR. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10261v2-abstract-full').style.display = 'none'; document.getElementById('2411.10261v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted on TPAMI</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10163">arXiv:2411.10163</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10163">pdf</a>, <a href="https://arxiv.org/format/2411.10163">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Compound-QA: A Benchmark for Evaluating LLMs on Compound Questions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hou%2C+Y">Yutao Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+Y">Yajing Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Ruan%2C+Z">Zhiwen Ruan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongru Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Ge%2C+W">Weifeng Ge</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yun Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+G">Guanhua Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10163v1-abstract-short" style="display: inline;"> Large language models (LLMs) demonstrate remarkable performance across various tasks, prompting researchers to develop diverse evaluation benchmarks. However, existing benchmarks typically measure the ability of LLMs to respond to individual questions, neglecting the complex interactions in real-world applications. In this paper, we introduce Compound Question Synthesis (CQ-Syn) to create the Comp&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10163v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10163v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10163v1-abstract-full" style="display: none;"> Large language models (LLMs) demonstrate remarkable performance across various tasks, prompting researchers to develop diverse evaluation benchmarks. However, existing benchmarks typically measure the ability of LLMs to respond to individual questions, neglecting the complex interactions in real-world applications. In this paper, we introduce Compound Question Synthesis (CQ-Syn) to create the Compound-QA benchmark, focusing on compound questions with multiple sub-questions. This benchmark is derived from existing QA datasets, annotated with proprietary LLMs and verified by humans for accuracy. It encompasses five categories: Factual-Statement, Cause-and-Effect, Hypothetical-Analysis, Comparison-and-Selection, and Evaluation-and-Suggestion. It evaluates the LLM capability in terms of three dimensions including understanding, reasoning, and knowledge. Our assessment of eight open-source LLMs using Compound-QA reveals distinct patterns in their responses to compound questions, which are significantly poorer than those to non-compound questions. Additionally, we investigate various methods to enhance LLMs performance on compound questions. The results indicate that these approaches significantly improve the models&#39; comprehension and reasoning abilities on compound questions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10163v1-abstract-full').style.display = 'none'; document.getElementById('2411.10163v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09924">arXiv:2411.09924</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09924">pdf</a>, <a href="https://arxiv.org/format/2411.09924">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> A Polarization Image Dehazing Method Based on the Principle of Physical Diffusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhenjun Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+L">Lijun Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongjin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Lilian Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+Y">Yunze He</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yaonan Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09924v1-abstract-short" style="display: inline;"> Computer vision is increasingly used in areas such as unmanned vehicles, surveillance systems and remote sensing. However, in foggy scenarios, image degradation leads to loss of target details, which seriously affects the accuracy and effectiveness of these vision tasks. Polarized light, due to the fact that its electromagnetic waves vibrate in a specific direction, is able to resist scattering an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09924v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09924v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09924v1-abstract-full" style="display: none;"> Computer vision is increasingly used in areas such as unmanned vehicles, surveillance systems and remote sensing. However, in foggy scenarios, image degradation leads to loss of target details, which seriously affects the accuracy and effectiveness of these vision tasks. Polarized light, due to the fact that its electromagnetic waves vibrate in a specific direction, is able to resist scattering and refraction effects in complex media more effectively compared to unpolarized light. As a result, polarized light has a greater ability to maintain its polarization characteristics in complex transmission media and under long-distance imaging conditions. This property makes polarized imaging especially suitable for complex scenes such as outdoor and underwater, especially in foggy environments, where higher quality images can be obtained. Based on this advantage, we propose an innovative semi-physical polarization dehazing method that does not rely on an external light source. The method simulates the diffusion process of fog and designs a diffusion kernel that corresponds to the image blurriness caused by this diffusion. By employing spatiotemporal Fourier transforms and deconvolution operations, the method recovers the state of fog droplets prior to diffusion and the light inversion distribution of objects. This approach effectively achieves dehazing and detail enhancement of the scene. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09924v1-abstract-full').style.display = 'none'; document.getElementById('2411.09924v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09879">arXiv:2411.09879</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09879">pdf</a>, <a href="https://arxiv.org/format/2411.09879">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> A Multi-Label EEG Dataset for Mental Attention State Classification in Online Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Huan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuzhe Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+G">Guanjian Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+X">Xinxin Du</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haochong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+D">Dalin Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09879v1-abstract-short" style="display: inline;"> Attention is a vital cognitive process in the learning and memory environment, particularly in the context of online learning. Traditional methods for classifying attention states of online learners based on behavioral signals are prone to distortion, leading to increased interest in using electroencephalography (EEG) signals for authentic and accurate assessment. However, the field of attention s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09879v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09879v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09879v1-abstract-full" style="display: none;"> Attention is a vital cognitive process in the learning and memory environment, particularly in the context of online learning. Traditional methods for classifying attention states of online learners based on behavioral signals are prone to distortion, leading to increased interest in using electroencephalography (EEG) signals for authentic and accurate assessment. However, the field of attention state classification based on EEG signals in online learning faces challenges, including the scarcity of publicly available datasets, the lack of standardized data collection paradigms, and the requirement to consider the interplay between attention and other psychological states. In light of this, we present the Multi-label EEG dataset for classifying Mental Attention states (MEMA) in online learning. We meticulously designed a reliable and standard experimental paradigm with three attention states: neutral, relaxing, and concentrating, considering human physiological and psychological characteristics. This paradigm collected EEG signals from 20 subjects, each participating in 12 trials, resulting in 1,060 minutes of data. Emotional state labels, basic personal information, and personality traits were also collected to investigate the relationship between attention and other psychological states. Extensive quantitative and qualitative analysis, including a multi-label correlation study, validated the quality of the EEG attention data. The MEMA dataset and analysis provide valuable insights for advancing research on attention in online learning. The dataset is publicly available at \url{https://github.com/GuanjianLiu/MEMA}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09879v1-abstract-full').style.display = 'none'; document.getElementById('2411.09879v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09547">arXiv:2411.09547</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09547">pdf</a>, <a href="https://arxiv.org/format/2411.09547">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Piecing It All Together: Verifying Multi-Hop Multimodal Claims </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoran Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Rangapur%2C+A">Aman Rangapur</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+X">Xiongxiao Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+Y">Yueqing Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Gharwi%2C+H">Haroon Gharwi</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+C">Carl Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Shu%2C+K">Kai Shu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09547v1-abstract-short" style="display: inline;"> Existing claim verification datasets often do not require systems to perform complex reasoning or effectively interpret multimodal evidence. To address this, we introduce a new task: multi-hop multimodal claim verification. This task challenges models to reason over multiple pieces of evidence from diverse sources, including text, images, and tables, and determine whether the combined multimodal e&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09547v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09547v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09547v1-abstract-full" style="display: none;"> Existing claim verification datasets often do not require systems to perform complex reasoning or effectively interpret multimodal evidence. To address this, we introduce a new task: multi-hop multimodal claim verification. This task challenges models to reason over multiple pieces of evidence from diverse sources, including text, images, and tables, and determine whether the combined multimodal evidence supports or refutes a given claim. To study this task, we construct MMCV, a large-scale dataset comprising 16k multi-hop claims paired with multimodal evidence, generated and refined using large language models, with additional input from human feedback. We show that MMCV is challenging even for the latest state-of-the-art multimodal large language models, especially as the number of reasoning hops increases. Additionally, we establish a human performance benchmark on a subset of MMCV. We hope this dataset and its evaluation task will encourage future research in multimodal multi-hop claim verification. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09547v1-abstract-full').style.display = 'none'; document.getElementById('2411.09547v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09266">arXiv:2411.09266</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09266">pdf</a>, <a href="https://arxiv.org/format/2411.09266">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> How Good is ChatGPT at Audiovisual Deepfake Detection: A Comparative Study of ChatGPT, AI Models and Human Perception </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shahzad%2C+S+A">Sahibzada Adil Shahzad</a>, <a href="/search/cs?searchtype=author&amp;query=Hashmi%2C+A">Ammarah Hashmi</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+Y">Yan-Tsung Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Tsao%2C+Y">Yu Tsao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hsin-Min Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09266v1-abstract-short" style="display: inline;"> Multimodal deepfakes involving audiovisual manipulations are a growing threat because they are difficult to detect with the naked eye or using unimodal deep learningbased forgery detection methods. Audiovisual forensic models, while more capable than unimodal models, require large training datasets and are computationally expensive for training and inference. Furthermore, these models lack interpr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09266v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09266v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09266v1-abstract-full" style="display: none;"> Multimodal deepfakes involving audiovisual manipulations are a growing threat because they are difficult to detect with the naked eye or using unimodal deep learningbased forgery detection methods. Audiovisual forensic models, while more capable than unimodal models, require large training datasets and are computationally expensive for training and inference. Furthermore, these models lack interpretability and often do not generalize well to unseen manipulations. In this study, we examine the detection capabilities of a large language model (LLM) (i.e., ChatGPT) to identify and account for any possible visual and auditory artifacts and manipulations in audiovisual deepfake content. Extensive experiments are conducted on videos from a benchmark multimodal deepfake dataset to evaluate the detection performance of ChatGPT and compare it with the detection capabilities of state-of-the-art multimodal forensic models and humans. Experimental results demonstrate the importance of domain knowledge and prompt engineering for video forgery detection tasks using LLMs. Unlike approaches based on end-to-end learning, ChatGPT can account for spatial and spatiotemporal artifacts and inconsistencies that may exist within or across modalities. Additionally, we discuss the limitations of ChatGPT for multimedia forensic tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09266v1-abstract-full').style.display = 'none'; document.getElementById('2411.09266v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09263">arXiv:2411.09263</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09263">pdf</a>, <a href="https://arxiv.org/format/2411.09263">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Rethinking Weight-Averaged Model-merging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+C">Congbo Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Almakky%2C+I">Ibrahim Almakky</a>, <a href="/search/cs?searchtype=author&amp;query=Reid%2C+I">Ian Reid</a>, <a href="/search/cs?searchtype=author&amp;query=Carneiro%2C+G">Gustavo Carneiro</a>, <a href="/search/cs?searchtype=author&amp;query=Yaqub%2C+M">Mohammad Yaqub</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09263v2-abstract-short" style="display: inline;"> Weight-averaged model-merging has emerged as a powerful approach in deep learning, capable of enhancing model performance without fine-tuning or retraining. However, the underlying mechanisms that explain its effectiveness remain largely unexplored. In this paper, we investigate this technique from three novel perspectives to provide deeper insights into how and why weight-averaged model-merging w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09263v2-abstract-full').style.display = 'inline'; document.getElementById('2411.09263v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09263v2-abstract-full" style="display: none;"> Weight-averaged model-merging has emerged as a powerful approach in deep learning, capable of enhancing model performance without fine-tuning or retraining. However, the underlying mechanisms that explain its effectiveness remain largely unexplored. In this paper, we investigate this technique from three novel perspectives to provide deeper insights into how and why weight-averaged model-merging works: (1) we examine the intrinsic patterns captured by the learning of the model weights, through the visualizations of their patterns on several datasets, showing that these weights often encode structured and interpretable patterns; (2) we investigate model ensemble merging strategies based on averaging on weights versus averaging on features, providing detailed analyses across diverse architectures and datasets; and (3) we explore the impact on model-merging prediction stability in terms of changing the parameter magnitude, revealing insights into the way of weight averaging works as regularization by showing the robustness across different parameter scales. Our findings shed light on the &#34;black box&#34; of weight-averaged model-merging, offering valuable insights and practical recommendations that advance the model-merging process. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09263v2-abstract-full').style.display = 'none'; document.getElementById('2411.09263v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08545">arXiv:2411.08545</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08545">pdf</a>, <a href="https://arxiv.org/format/2411.08545">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> APDDv2: Aesthetics of Paintings and Drawings Dataset with Artist Labeled Scores and Comments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jin%2C+X">Xin Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Qiao%2C+Q">Qianqian Qiao</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+Y">Yi Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Huaye Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Heng Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+S">Shan Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Jianfei Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+R">Rui Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08545v1-abstract-short" style="display: inline;"> Datasets play a pivotal role in training visual models, facilitating the development of abstract understandings of visual features through diverse image samples and multidimensional attributes. However, in the realm of aesthetic evaluation of artistic images, datasets remain relatively scarce. Existing painting datasets are often characterized by limited scoring dimensions and insufficient annotat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08545v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08545v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08545v1-abstract-full" style="display: none;"> Datasets play a pivotal role in training visual models, facilitating the development of abstract understandings of visual features through diverse image samples and multidimensional attributes. However, in the realm of aesthetic evaluation of artistic images, datasets remain relatively scarce. Existing painting datasets are often characterized by limited scoring dimensions and insufficient annotations, thereby constraining the advancement and application of automatic aesthetic evaluation methods in the domain of painting. To bridge this gap, we introduce the Aesthetics Paintings and Drawings Dataset (APDD), the first comprehensive collection of paintings encompassing 24 distinct artistic categories and 10 aesthetic attributes. Building upon the initial release of APDDv1, our ongoing research has identified opportunities for enhancement in data scale and annotation precision. Consequently, APDDv2 boasts an expanded image corpus and improved annotation quality, featuring detailed language comments to better cater to the needs of both researchers and practitioners seeking high-quality painting datasets. Furthermore, we present an updated version of the Art Assessment Network for Specific Painting Styles, denoted as ArtCLIP. Experimental validation demonstrates the superior performance of this revised model in the realm of aesthetic evaluation, surpassing its predecessor in accuracy and efficacy. The dataset and model are available at https://github.com/BestiVictory/APDDv2.git. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08545v1-abstract-full').style.display = 'none'; document.getElementById('2411.08545v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08521">arXiv:2411.08521</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08521">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> SAD-TIME: a Spatiotemporal-fused network for depression detection with Automated multi-scale Depth-wise and TIME-interval-related common feature extractor </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Han-Guang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Hou%2C+H">Hui-Rang Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+L">Li-Cheng Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+C">Chen-Yang Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhong-Yi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Meng%2C+Q">Qing-Hao Meng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08521v2-abstract-short" style="display: inline;"> Background and Objective: Depression is a severe mental disorder, and accurate diagnosis is pivotal to the cure and rehabilitation of people with depression. However, the current questionnaire-based diagnostic methods could bring subjective biases and may be denied by subjects. In search of a more objective means of diagnosis, researchers have begun to experiment with deep learning-based methods f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08521v2-abstract-full').style.display = 'inline'; document.getElementById('2411.08521v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08521v2-abstract-full" style="display: none;"> Background and Objective: Depression is a severe mental disorder, and accurate diagnosis is pivotal to the cure and rehabilitation of people with depression. However, the current questionnaire-based diagnostic methods could bring subjective biases and may be denied by subjects. In search of a more objective means of diagnosis, researchers have begun to experiment with deep learning-based methods for identifying depressive disorders in recent years. Methods: In this study, a novel Spatiotemporal-fused network with Automated multi-scale Depth-wise and TIME-interval-related common feature extractor (SAD-TIME) is proposed. SAD-TIME incorporates an automated nodes&#39; common features extractor (CFE), a spatial sector (SpS), a modified temporal sector (TeS), and a domain adversarial learner (DAL). The CFE includes a multi-scale depth-wise 1D-convolutional neural network and a time-interval embedding generator, where the unique information of each channel is preserved. The SpS fuses the functional connectivity with the distance-based connectivity containing spatial position of EEG electrodes. A multi-head-attention graph convolutional network is also applied in the SpS to fuse the features from different EEG channels. The TeS is based on long short-term memory and graph transformer networks, where the temporal information of different time-windows is fused. Moreover, the DAL is used after the SpS to obtain the domain-invariant feature. Results: Experimental results under tenfold cross-validation show that the proposed SAD-TIME method achieves 92.00% and 94.00% depression classification accuracies on two datasets, respectively, in cross-subject mode. Conclusion: SAD-TIME is a robust depression detection model, where the automatedly-generated features, the SpS and the TeS assist the classification performance with the fusion of the innate spatiotemporal information in the EEG signals. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08521v2-abstract-full').style.display = 'none'; document.getElementById('2411.08521v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08488">arXiv:2411.08488</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08488">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> UNSCT-HRNet: Modeling Anatomical Uncertainty for Landmark Detection in Total Hip Arthroplasty </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wan%2C+J">Jiaxin Wan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+L">Lin Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoran Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+L">Liangwei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+W">Wei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Kou%2C+S">Shuheng Kou</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+R">Runtian Li</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+J">Jiayi Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Juanxiu Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jing Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+X">Xiaohui Du</a>, <a href="/search/cs?searchtype=author&amp;query=Hao%2C+R">Ruqian Hao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08488v1-abstract-short" style="display: inline;"> Total hip arthroplasty (THA) relies on accurate landmark detection from radiographic images, but unstructured data caused by irregular patient postures or occluded anatomical markers pose significant challenges for existing methods. To address this, we propose UNSCT-HRNet (Unstructured CT - High-Resolution Net), a deep learning-based framework that integrates a Spatial Relationship Fusion (SRF) mo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08488v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08488v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08488v1-abstract-full" style="display: none;"> Total hip arthroplasty (THA) relies on accurate landmark detection from radiographic images, but unstructured data caused by irregular patient postures or occluded anatomical markers pose significant challenges for existing methods. To address this, we propose UNSCT-HRNet (Unstructured CT - High-Resolution Net), a deep learning-based framework that integrates a Spatial Relationship Fusion (SRF) module and an Uncertainty Estimation (UE) module. The SRF module, utilizing coordinate convolution and polarized attention, enhances the model&#39;s ability to capture complex spatial relationships. Meanwhile, the UE module which based on entropy ensures predictions are anatomically relevant. For unstructured data, the proposed method can predict landmarks without relying on the fixed number of points, which shows higher accuracy and better robustness comparing with the existing methods. Our UNSCT-HRNet demonstrates over a 60% improvement across multiple metrics in unstructured data. The experimental results also reveal that our approach maintains good performance on the structured dataset. Overall, the proposed UNSCT-HRNet has the potential to be used as a new reliable, automated solution for THA surgical planning and postoperative monitoring. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08488v1-abstract-full').style.display = 'none'; document.getElementById('2411.08488v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08333">arXiv:2411.08333</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08333">pdf</a>, <a href="https://arxiv.org/format/2411.08333">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SASE: A Searching Architecture for Squeeze and Excitation Operations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hanming Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yunlong Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zijun Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Huifen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuan Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08333v1-abstract-short" style="display: inline;"> In the past few years, channel-wise and spatial-wise attention blocks have been widely adopted as supplementary modules in deep neural networks, enhancing network representational abilities while introducing low complexity. Most attention modules follow a squeeze-and-excitation paradigm. However, to design such attention modules, requires a substantial amount of experiments and computational resou&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08333v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08333v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08333v1-abstract-full" style="display: none;"> In the past few years, channel-wise and spatial-wise attention blocks have been widely adopted as supplementary modules in deep neural networks, enhancing network representational abilities while introducing low complexity. Most attention modules follow a squeeze-and-excitation paradigm. However, to design such attention modules, requires a substantial amount of experiments and computational resources. Neural Architecture Search (NAS), meanwhile, is able to automate the design of neural networks and spares the numerous experiments required for an optimal architecture. This motivates us to design a search architecture that can automatically find near-optimal attention modules through NAS. We propose SASE, a Searching Architecture for Squeeze and Excitation operations, to form a plug-and-play attention block by searching within certain search space. The search space is separated into 4 different sets, each corresponds to the squeeze or excitation operation along the channel or spatial dimension. Additionally, the search sets include not only existing attention blocks but also other operations that have not been utilized in attention mechanisms before. To the best of our knowledge, SASE is the first attempt to subdivide the attention search space and search for architectures beyond currently known attention modules. The searched attention module is tested with extensive experiments across a range of visual tasks. Experimental results indicate that visual backbone networks (ResNet-50/101) using the SASE attention module achieved the best performance compared to those using the current state-of-the-art attention modules. Codes are included in the supplementary material, and they will be made public later. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08333v1-abstract-full').style.display = 'none'; document.getElementById('2411.08333v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08294">arXiv:2411.08294</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08294">pdf</a>, <a href="https://arxiv.org/format/2411.08294">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Collaborative Participatory Research with LLM Agents in South Asia: An Empirically-Grounded Methodological Initiative and Agenda from Field Evidence in Sri Lanka </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+X">Xinjie Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Sriwarnasinghe%2C+S+M">Shyaman Maduranga Sriwarnasinghe</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+J">Jiacheng Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shiyun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Morikawa%2C+S">So Morikawa</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08294v1-abstract-short" style="display: inline;"> The integration of artificial intelligence into development research methodologies presents unprecedented opportunities for addressing persistent challenges in participatory research, particularly in linguistically diverse regions like South Asia. Drawing from an empirical implementation in Sri Lanka&#39;s Sinhala-speaking communities, this paper presents an empirically grounded methodological framewo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08294v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08294v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08294v1-abstract-full" style="display: none;"> The integration of artificial intelligence into development research methodologies presents unprecedented opportunities for addressing persistent challenges in participatory research, particularly in linguistically diverse regions like South Asia. Drawing from an empirical implementation in Sri Lanka&#39;s Sinhala-speaking communities, this paper presents an empirically grounded methodological framework designed to transform participatory development research, situated in the challenging multilingual context of Sri Lanka&#39;s flood-prone Nilwala River Basin. Moving beyond conventional translation and data collection tools, this framework deploys a multi-agent system architecture that redefines how data collection, analysis, and community engagement are conducted in linguistically and culturally diverse research settings. This structured agent-based approach enables participatory research that is both scalable and responsive, ensuring that community perspectives remain integral to research outcomes. Field experiences reveal the immense potential of LLM-based systems in addressing long-standing issues in development research across resource-limited regions, offering both quantitative efficiencies and qualitative improvements in inclusivity. At a broader methodological level, this research agenda advocates for AI-driven participatory research tools that maintain ethical considerations, cultural respect, and operational efficiency, highlighting strategic pathways for deploying AI systems that reinforce community agency and equitable knowledge generation, potentially informing broader research agendas across the Global South. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08294v1-abstract-full').style.display = 'none'; document.getElementById('2411.08294v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 1 figure</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07650">arXiv:2411.07650</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07650">pdf</a>, <a href="https://arxiv.org/format/2411.07650">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Understanding Audiovisual Deepfake Detection: Techniques, Challenges, Human Factors and Perceptual Insights </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hashmi%2C+A">Ammarah Hashmi</a>, <a href="/search/cs?searchtype=author&amp;query=Shahzad%2C+S+A">Sahibzada Adil Shahzad</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+C">Chia-Wen Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Tsao%2C+Y">Yu Tsao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hsin-Min Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07650v1-abstract-short" style="display: inline;"> Deep Learning has been successfully applied in diverse fields, and its impact on deepfake detection is no exception. Deepfakes are fake yet realistic synthetic content that can be used deceitfully for political impersonation, phishing, slandering, or spreading misinformation. Despite extensive research on unimodal deepfake detection, identifying complex deepfakes through joint analysis of audio an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07650v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07650v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07650v1-abstract-full" style="display: none;"> Deep Learning has been successfully applied in diverse fields, and its impact on deepfake detection is no exception. Deepfakes are fake yet realistic synthetic content that can be used deceitfully for political impersonation, phishing, slandering, or spreading misinformation. Despite extensive research on unimodal deepfake detection, identifying complex deepfakes through joint analysis of audio and visual streams remains relatively unexplored. To fill this gap, this survey first provides an overview of audiovisual deepfake generation techniques, applications, and their consequences, and then provides a comprehensive review of state-of-the-art methods that combine audio and visual modalities to enhance detection accuracy, summarizing and critically analyzing their strengths and limitations. Furthermore, we discuss existing open source datasets for a deeper understanding, which can contribute to the research community and provide necessary information to beginners who want to analyze deep learning-based audiovisual methods for video forensics. By bridging the gap between unimodal and multimodal approaches, this paper aims to improve the effectiveness of deepfake detection strategies and guide future research in cybersecurity and media integrity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07650v1-abstract-full').style.display = 'none'; document.getElementById('2411.07650v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07573">arXiv:2411.07573</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07573">pdf</a>, <a href="https://arxiv.org/format/2411.07573">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Robotic Control Optimization Through Kernel Selection in Safe Bayesian Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+L">Lihao Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongxuan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xiaocong Li</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+J">Jun Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Vadakkepat%2C+P">Prahlad Vadakkepat</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07573v1-abstract-short" style="display: inline;"> Control system optimization has long been a fundamental challenge in robotics. While recent advancements have led to the development of control algorithms that leverage learning-based approaches, such as SafeOpt, to optimize single feedback controllers, scaling these methods to high-dimensional complex systems with multiple controllers remains an open problem. In this paper, we propose a novel lea&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07573v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07573v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07573v1-abstract-full" style="display: none;"> Control system optimization has long been a fundamental challenge in robotics. While recent advancements have led to the development of control algorithms that leverage learning-based approaches, such as SafeOpt, to optimize single feedback controllers, scaling these methods to high-dimensional complex systems with multiple controllers remains an open problem. In this paper, we propose a novel learning-based control optimization method, which enhances the additive Gaussian process-based Safe Bayesian Optimization algorithm to efficiently tackle high-dimensional problems through kernel selection. We use PID controller optimization in drones as a representative example and test the method on Safe Control Gym, a benchmark designed for evaluating safe control techniques. We show that the proposed method provides a more efficient and optimal solution for high-dimensional control optimization problems, demonstrating significant improvements over existing techniques. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07573v1-abstract-full').style.display = 'none'; document.getElementById('2411.07573v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by 2024 IEEE International Conference on Robotics and Biomimetics (ROBIO)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07518">arXiv:2411.07518</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07518">pdf</a>, <a href="https://arxiv.org/format/2411.07518">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> LLM App Squatting and Cloning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Y">Yinglin Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Hou%2C+X">Xinyi Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yanjie Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+K">Kai Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoyu Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07518v1-abstract-short" style="display: inline;"> Impersonation tactics, such as app squatting and app cloning, have posed longstanding challenges in mobile app stores, where malicious actors exploit the names and reputations of popular apps to deceive users. With the rapid growth of Large Language Model (LLM) stores like GPT Store and FlowGPT, these issues have similarly surfaced, threatening the integrity of the LLM app ecosystem. In this study&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07518v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07518v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07518v1-abstract-full" style="display: none;"> Impersonation tactics, such as app squatting and app cloning, have posed longstanding challenges in mobile app stores, where malicious actors exploit the names and reputations of popular apps to deceive users. With the rapid growth of Large Language Model (LLM) stores like GPT Store and FlowGPT, these issues have similarly surfaced, threatening the integrity of the LLM app ecosystem. In this study, we present the first large-scale analysis of LLM app squatting and cloning using our custom-built tool, LLMappCrazy. LLMappCrazy covers 14 squatting generation techniques and integrates Levenshtein distance and BERT-based semantic analysis to detect cloning by analyzing app functional similarities. Using this tool, we generated variations of the top 1000 app names and found over 5,000 squatting apps in the dataset. Additionally, we observed 3,509 squatting apps and 9,575 cloning cases across six major platforms. After sampling, we find that 18.7% of the squatting apps and 4.9% of the cloning apps exhibited malicious behavior, including phishing, malware distribution, fake content dissemination, and aggressive ad injection. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07518v1-abstract-full').style.display = 'none'; document.getElementById('2411.07518v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07392">arXiv:2411.07392</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07392">pdf</a>, <a href="https://arxiv.org/format/2411.07392">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Feature-Space Semantic Invariance: Enhanced OOD Detection for Open-Set Domain Generalization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoliang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+F">Feng Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07392v1-abstract-short" style="display: inline;"> Open-set domain generalization addresses a real-world challenge: training a model to generalize across unseen domains (domain generalization) while also detecting samples from unknown classes not encountered during training (open-set recognition). However, most existing approaches tackle these issues separately, limiting their practical applicability. To overcome this limitation, we propose a unif&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07392v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07392v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07392v1-abstract-full" style="display: none;"> Open-set domain generalization addresses a real-world challenge: training a model to generalize across unseen domains (domain generalization) while also detecting samples from unknown classes not encountered during training (open-set recognition). However, most existing approaches tackle these issues separately, limiting their practical applicability. To overcome this limitation, we propose a unified framework for open-set domain generalization by introducing Feature-space Semantic Invariance (FSI). FSI maintains semantic consistency across different domains within the feature space, enabling more accurate detection of OOD instances in unseen domains. Additionally, we adopt a generative model to produce synthetic data with novel domain styles or class labels, enhancing model robustness. Initial experiments show that our method improves AUROC by 9.1% to 18.9% on ColoredMNIST, while also significantly increasing in-distribution classification accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07392v1-abstract-full').style.display = 'none'; document.getElementById('2411.07392v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE BigData 2024, Ph.D. Forum</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07111">arXiv:2411.07111</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07111">pdf</a>, <a href="https://arxiv.org/format/2411.07111">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Building a Taiwanese Mandarin Spoken Language Model: A First Attempt </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+C">Chih-Kai Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+Y">Yu-Kuan Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+C">Chen-An Li</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Y">Yi-Cheng Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Y">Yu-Xiang Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+W">Wei-Chih Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chung%2C+H+L">Ho Lam Chung</a>, <a href="/search/cs?searchtype=author&amp;query=Kuan%2C+C">Chun-Yi Kuan</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+W">Wei-Ping Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+K">Ke-Han Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+T">Tzu-Quan Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hsiu-Hsuan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+E">En-Pei Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Hsu%2C+C">Chan-Jan Hsu</a>, <a href="/search/cs?searchtype=author&amp;query=Tseng%2C+L">Liang-Hsuan Tseng</a>, <a href="/search/cs?searchtype=author&amp;query=Chiu%2C+I">I-Hsiang Chiu</a>, <a href="/search/cs?searchtype=author&amp;query=Sanga%2C+U">Ulin Sanga</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xuanjun Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Hsu%2C+P">Po-chun Hsu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+S">Shu-wen Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+H">Hung-yi Lee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07111v1-abstract-short" style="display: inline;"> This technical report presents our initial attempt to build a spoken large language model (LLM) for Taiwanese Mandarin, specifically tailored to enable real-time, speech-to-speech interaction in multi-turn conversations. Our end-to-end model incorporates a decoder-only transformer architecture and aims to achieve seamless interaction while preserving the conversational flow, including full-duplex&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07111v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07111v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07111v1-abstract-full" style="display: none;"> This technical report presents our initial attempt to build a spoken large language model (LLM) for Taiwanese Mandarin, specifically tailored to enable real-time, speech-to-speech interaction in multi-turn conversations. Our end-to-end model incorporates a decoder-only transformer architecture and aims to achieve seamless interaction while preserving the conversational flow, including full-duplex capabilities allowing simultaneous speaking and listening. The paper also details the training process, including data preparation with synthesized dialogues and adjustments for real-time interaction. We also developed a platform to evaluate conversational fluency and response coherence in multi-turn dialogues. We hope the release of the report can contribute to the future development of spoken LLMs in Taiwanese Mandarin. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07111v1-abstract-full').style.display = 'none'; document.getElementById('2411.07111v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Work in progress</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06991">arXiv:2411.06991</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06991">pdf</a>, <a href="https://arxiv.org/format/2411.06991">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SIESEF-FusionNet: Spatial Inter-correlation Enhancement and Spatially-Embedded Feature Fusion Network for LiDAR Point Cloud Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jiale Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+F">Fei Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Mao%2C+J">Jianliang Mao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoping Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chuanlin Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06991v1-abstract-short" style="display: inline;"> The ambiguity at the boundaries of different semantic classes in point cloud semantic segmentation often leads to incorrect decisions in intelligent perception systems, such as autonomous driving. Hence, accurate delineation of the boundaries is crucial for improving safety in autonomous driving. A novel spatial inter-correlation enhancement and spatially-embedded feature fusion network (SIESEF-Fu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06991v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06991v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06991v1-abstract-full" style="display: none;"> The ambiguity at the boundaries of different semantic classes in point cloud semantic segmentation often leads to incorrect decisions in intelligent perception systems, such as autonomous driving. Hence, accurate delineation of the boundaries is crucial for improving safety in autonomous driving. A novel spatial inter-correlation enhancement and spatially-embedded feature fusion network (SIESEF-FusionNet) is proposed in this paper, enhancing spatial inter-correlation by combining inverse distance weighting and angular compensation to extract more beneficial spatial information without causing redundancy. Meanwhile, a new spatial adaptive pooling module is also designed, embedding enhanced spatial information into semantic features for strengthening the context-awareness of semantic features. Experimental results demonstrate that 83.7% mIoU and 97.8% OA are achieved by SIESEF-FusionNet on the Toronto3D dataset, with performance superior to other baseline methods. A value of 61.1% mIoU is reached on the semanticKITTI dataset, where a marked improvement in segmentation performance is observed. In addition, the effectiveness and plug-and-play capability of the proposed modules are further verified through ablation studies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06991v1-abstract-full').style.display = 'none'; document.getElementById('2411.06991v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06782">arXiv:2411.06782</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06782">pdf</a>, <a href="https://arxiv.org/format/2411.06782">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> QuadWBG: Generalizable Quadrupedal Whole-Body Grasping </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jilong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Rajabov%2C+J">Javokhirbek Rajabov</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+C">Chaoyi Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+Y">Yiming Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">He Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06782v1-abstract-short" style="display: inline;"> Legged robots with advanced manipulation capabilities have the potential to significantly improve household duties and urban maintenance. Despite considerable progress in developing robust locomotion and precise manipulation methods, seamlessly integrating these into cohesive whole-body control for real-world applications remains challenging. In this paper, we present a modular framework for robus&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06782v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06782v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06782v1-abstract-full" style="display: none;"> Legged robots with advanced manipulation capabilities have the potential to significantly improve household duties and urban maintenance. Despite considerable progress in developing robust locomotion and precise manipulation methods, seamlessly integrating these into cohesive whole-body control for real-world applications remains challenging. In this paper, we present a modular framework for robust and generalizable whole-body loco-manipulation controller based on a single arm-mounted camera. By using reinforcement learning (RL), we enable a robust low-level policy for command execution over 5 dimensions (5D) and a grasp-aware high-level policy guided by a novel metric, Generalized Oriented Reachability Map (GORM). The proposed system achieves state-of-the-art one-time grasping accuracy of 89% in the real world, including challenging tasks such as grasping transparent objects. Through extensive simulations and real-world experiments, we demonstrate that our system can effectively manage a large workspace, from floor level to above body height, and perform diverse whole-body loco-manipulation tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06782v1-abstract-full').style.display = 'none'; document.getElementById('2411.06782v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06741">arXiv:2411.06741</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06741">pdf</a>, <a href="https://arxiv.org/format/2411.06741">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Applications">stat.AP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Methane projections from Canada&#39;s oil sands tailings using scientific deep learning reveal significant underestimation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Saha%2C+E">Esha Saha</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+O">Oscar Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chakraborty%2C+A+K">Amit K. Chakraborty</a>, <a href="/search/cs?searchtype=author&amp;query=Garcia%2C+P+V">Pablo Venegas Garcia</a>, <a href="/search/cs?searchtype=author&amp;query=Milne%2C+R">Russell Milne</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hao Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06741v1-abstract-short" style="display: inline;"> Bitumen extraction for the production of synthetic crude oil in Canada&#39;s Athabasca Oil Sands industry has recently come under spotlight for being a significant source of greenhouse gas emission. A major cause of concern is methane, a greenhouse gas produced by the anaerobic biodegradation of hydrocarbons in oil sands residues, or tailings, stored in settle basins commonly known as oil sands tailin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06741v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06741v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06741v1-abstract-full" style="display: none;"> Bitumen extraction for the production of synthetic crude oil in Canada&#39;s Athabasca Oil Sands industry has recently come under spotlight for being a significant source of greenhouse gas emission. A major cause of concern is methane, a greenhouse gas produced by the anaerobic biodegradation of hydrocarbons in oil sands residues, or tailings, stored in settle basins commonly known as oil sands tailing ponds. In order to determine the methane emitting potential of these tailing ponds and have future methane projections, we use real-time weather data, mechanistic models developed from laboratory controlled experiments, and industrial reports to train a physics constrained machine learning model. Our trained model can successfully identify the directions of active ponds and estimate their emission levels, which are generally hard to obtain due to data sampling restrictions. We found that each active oil sands tailing pond could emit between 950 to 1500 tonnes of methane per year, whose environmental impact is equivalent to carbon dioxide emissions from at least 6000 gasoline powered vehicles. Although abandoned ponds are often presumed to have insignificant emissions, our findings indicate that these ponds could become active over time and potentially emit up to 1000 tonnes of methane each year. Taking an average over all datasets that was used in model training, we estimate that emissions around major oil sands regions would need to be reduced by approximately 12% over a year, to reduce the average methane concentrations to 2005 levels. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06741v1-abstract-full').style.display = 'none'; document.getElementById('2411.06741v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages, 8 figures, 2 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06558">arXiv:2411.06558</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06558">pdf</a>, <a href="https://arxiv.org/format/2411.06558">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Region-Aware Text-to-Image Generation via Hard Binding and Soft Refinement </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zhennan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yajie Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haofan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zhibo Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhengkai Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+J">Jun Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Q">Qian Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jian Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Tai%2C+Y">Ying Tai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06558v2-abstract-short" style="display: inline;"> Regional prompting, or compositional generation, which enables fine-grained spatial control, has gained increasing attention for its practicality in real-world applications. However, previous methods either introduce additional trainable modules, thus only applicable to specific models, or manipulate on score maps within cross-attention layers using attention masks, resulting in limited control st&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06558v2-abstract-full').style.display = 'inline'; document.getElementById('2411.06558v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06558v2-abstract-full" style="display: none;"> Regional prompting, or compositional generation, which enables fine-grained spatial control, has gained increasing attention for its practicality in real-world applications. However, previous methods either introduce additional trainable modules, thus only applicable to specific models, or manipulate on score maps within cross-attention layers using attention masks, resulting in limited control strength when the number of regions increases. To handle these limitations, we present RAG, a Regional-Aware text-to-image Generation method conditioned on regional descriptions for precise layout composition. RAG decouple the multi-region generation into two sub-tasks, the construction of individual region (Regional Hard Binding) that ensures the regional prompt is properly executed, and the overall detail refinement (Regional Soft Refinement) over regions that dismiss the visual boundaries and enhance adjacent interactions. Furthermore, RAG novelly makes repainting feasible, where users can modify specific unsatisfied regions in the last generation while keeping all other regions unchanged, without relying on additional inpainting models. Our approach is tuning-free and applicable to other frameworks as an enhancement to the prompt following property. Quantitative and qualitative experiments demonstrate that RAG achieves superior performance over attribute binding and object relationship than previous tuning-free methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06558v2-abstract-full').style.display = 'none'; document.getElementById('2411.06558v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Code is available at https://github.com/NJU-PCALab/RAG-Diffusion</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Wang%2C+H&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+H&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+H&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+H&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+H&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Wang%2C+H&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10