CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 10,719 results for author: <span class="mathjax">Chen, X</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/" aria-role="search"> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Chen, X"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Chen%2C+X&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Chen, X"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Chen%2C+X&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Chen%2C+X&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Chen%2C+X&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Chen%2C+X&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Chen%2C+X&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Chen%2C+X&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.10248">arXiv:2502.10248</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.10248">pdf</a>, <a href="https://arxiv.org/format/2502.10248">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Step-Video-T2V Technical Report: The Practice, Challenges, and Future of Video Foundation Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ma%2C+G">Guoqing Ma</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+H">Haoyang Huang</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+K">Kun Yan</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+L">Liangyu Chen</a>, <a href="/search/?searchtype=author&amp;query=Duan%2C+N">Nan Duan</a>, <a href="/search/?searchtype=author&amp;query=Yin%2C+S">Shengming Yin</a>, <a href="/search/?searchtype=author&amp;query=Wan%2C+C">Changyi Wan</a>, <a href="/search/?searchtype=author&amp;query=Ming%2C+R">Ranchen Ming</a>, <a href="/search/?searchtype=author&amp;query=Song%2C+X">Xiaoniu Song</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xing Chen</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yu Zhou</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+D">Deshan Sun</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+D">Deyu Zhou</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+J">Jian Zhou</a>, <a href="/search/?searchtype=author&amp;query=Tan%2C+K">Kaijun Tan</a>, <a href="/search/?searchtype=author&amp;query=An%2C+K">Kang An</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+M">Mei Chen</a>, <a href="/search/?searchtype=author&amp;query=Ji%2C+W">Wei Ji</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Q">Qiling Wu</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+W">Wen Sun</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+X">Xin Han</a>, <a href="/search/?searchtype=author&amp;query=Wei%2C+Y">Yanan Wei</a>, <a href="/search/?searchtype=author&amp;query=Ge%2C+Z">Zheng Ge</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+A">Aojie Li</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+B">Bin Wang</a> , et al. (90 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.10248v1-abstract-short" style="display: inline;"> We present Step-Video-T2V, a state-of-the-art text-to-video pre-trained model with 30B parameters and the ability to generate videos up to 204 frames in length. A deep compression Variational Autoencoder, Video-VAE, is designed for video generation tasks, achieving 16x16 spatial and 8x temporal compression ratios, while maintaining exceptional video reconstruction quality. User prompts are encoded&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.10248v1-abstract-full').style.display = 'inline'; document.getElementById('2502.10248v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.10248v1-abstract-full" style="display: none;"> We present Step-Video-T2V, a state-of-the-art text-to-video pre-trained model with 30B parameters and the ability to generate videos up to 204 frames in length. A deep compression Variational Autoencoder, Video-VAE, is designed for video generation tasks, achieving 16x16 spatial and 8x temporal compression ratios, while maintaining exceptional video reconstruction quality. User prompts are encoded using two bilingual text encoders to handle both English and Chinese. A DiT with 3D full attention is trained using Flow Matching and is employed to denoise input noise into latent frames. A video-based DPO approach, Video-DPO, is applied to reduce artifacts and improve the visual quality of the generated videos. We also detail our training strategies and share key observations and insights. Step-Video-T2V&#39;s performance is evaluated on a novel video generation benchmark, Step-Video-T2V-Eval, demonstrating its state-of-the-art text-to-video quality when compared with both open-source and commercial engines. Additionally, we discuss the limitations of current diffusion-based model paradigm and outline future directions for video foundation models. We make both Step-Video-T2V and Step-Video-T2V-Eval available at https://github.com/stepfun-ai/Step-Video-T2V. The online version can be accessed from https://yuewen.cn/videos as well. Our goal is to accelerate the innovation of video foundation models and empower video content creators. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.10248v1-abstract-full').style.display = 'none'; document.getElementById('2502.10248v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">35 pages, 14 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.10203">arXiv:2502.10203</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.10203">pdf</a>, <a href="https://arxiv.org/format/2502.10203">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> AI-in-the-Loop Sensing and Communication Joint Design for Edge Intelligence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Cai%2C+Z">Zhijie Cai</a>, <a href="/search/?searchtype=author&amp;query=Cao%2C+X">Xiaowen Cao</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xu Chen</a>, <a href="/search/?searchtype=author&amp;query=Cui%2C+Y">Yuanhao Cui</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+G">Guangxu Zhu</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+K">Kaibin Huang</a>, <a href="/search/?searchtype=author&amp;query=Cui%2C+S">Shuguang Cui</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.10203v1-abstract-short" style="display: inline;"> Recent breakthroughs in artificial intelligence (AI), wireless communications, and sensing technologies have accelerated the evolution of edge intelligence. However, conventional systems still grapple with issues such as low communication efficiency, redundant data acquisition, and poor model generalization. To overcome these challenges, we propose an innovative framework that enhances edge intell&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.10203v1-abstract-full').style.display = 'inline'; document.getElementById('2502.10203v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.10203v1-abstract-full" style="display: none;"> Recent breakthroughs in artificial intelligence (AI), wireless communications, and sensing technologies have accelerated the evolution of edge intelligence. However, conventional systems still grapple with issues such as low communication efficiency, redundant data acquisition, and poor model generalization. To overcome these challenges, we propose an innovative framework that enhances edge intelligence through AI-in-the-loop joint sensing and communication (JSAC). This framework features an AI-driven closed-loop control architecture that jointly optimizes system resources, thereby delivering superior system-level performance. A key contribution of our work is establishing an explicit relationship between validation loss and the system&#39;s tunable parameters. This insight enables dynamic reduction of the generalization error through AI-driven closed-loop control. Specifically, for sensing control, we introduce an adaptive data collection strategy based on gradient importance sampling, allowing edge devices to autonomously decide when to terminate data acquisition and how to allocate sample weights based on real-time model feedback. For communication control, drawing inspiration from stochastic gradient Langevin dynamics (SGLD), our joint optimization of transmission power and batch size converts channel and data noise into gradient perturbations that help mitigate overfitting. Experimental evaluations demonstrate that our framework reduces communication energy consumption by up to 77 percent and sensing costs measured by the number of collected samples by up to 52 percent while significantly improving model generalization -- with up to 58 percent reductions of the final validation loss. It validates that the proposed scheme can harvest the mutual benefit of AI and JSAC systems by incorporating the model itself into the control loop of the system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.10203v1-abstract-full').style.display = 'none'; document.getElementById('2502.10203v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.10180">arXiv:2502.10180</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.10180">pdf</a>, <a href="https://arxiv.org/format/2502.10180">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Safe platooning control of connected and autonomous vehicles on curved multi-lane roads </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiao Chen</a>, <a href="/search/?searchtype=author&amp;query=Tang%2C+Z">Zhiqi Tang</a>, <a href="/search/?searchtype=author&amp;query=Johansson%2C+K+H">Karl Henrik Johansson</a>, <a href="/search/?searchtype=author&amp;query=M%C3%A5rtensson%2C+J">Jonas M氓rtensson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.10180v1-abstract-short" style="display: inline;"> This paper investigates the safe platoon formation tracking and merging control problem of connected and automated vehicles (CAVs) on curved multi-lane roads. The first novelty is the separation of the control designs into two distinct parts: a lateral control law that ensures a geometrical convergence towards the reference path regardless of the translational velocity, and a longitudinal control&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.10180v1-abstract-full').style.display = 'inline'; document.getElementById('2502.10180v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.10180v1-abstract-full" style="display: none;"> This paper investigates the safe platoon formation tracking and merging control problem of connected and automated vehicles (CAVs) on curved multi-lane roads. The first novelty is the separation of the control designs into two distinct parts: a lateral control law that ensures a geometrical convergence towards the reference path regardless of the translational velocity, and a longitudinal control design for each vehicle to achieve the desired relative arc length and velocity with respect to its neighboring vehicle. The second novelty is exploiting the constructive barrier feedback as an additive term to the nominal tracking control, ensuring both lateral and longitudinal collision avoidance. This constructive barrier feedback acts as a dissipative term, slowing down the relative velocity toward obstacles without affecting the nominal controller&#39;s performance. Consequently, our proposed control method enables safe platoon formation of vehicles on curved multi-lane roads, with theoretical guarantees for safety invariance and stability analysis. Simulation and experimental results on connected vehicles are provided to further validate the effectiveness of the proposed method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.10180v1-abstract-full').style.display = 'none'; document.getElementById('2502.10180v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.09940">arXiv:2502.09940</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.09940">pdf</a>, <a href="https://arxiv.org/format/2502.09940">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> A Preliminary Exploration with GPT-4o Voice Mode </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Lin%2C+Y">Yu-Xiang Lin</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+C">Chih-Kai Yang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+W">Wei-Chih Chen</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+C">Chen-An Li</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+C">Chien-yu Huang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xuanjun Chen</a>, <a href="/search/?searchtype=author&amp;query=Lee%2C+H">Hung-yi Lee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.09940v1-abstract-short" style="display: inline;"> With the rise of multimodal large language models, GPT-4o stands out as a pioneering model, driving us to evaluate its capabilities. This report assesses GPT-4o across various tasks to analyze its audio processing and reasoning abilities. We find that GPT-4o exhibits strong knowledge in audio, speech, and music understanding, performing well in tasks like intent classification, spoken command clas&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09940v1-abstract-full').style.display = 'inline'; document.getElementById('2502.09940v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.09940v1-abstract-full" style="display: none;"> With the rise of multimodal large language models, GPT-4o stands out as a pioneering model, driving us to evaluate its capabilities. This report assesses GPT-4o across various tasks to analyze its audio processing and reasoning abilities. We find that GPT-4o exhibits strong knowledge in audio, speech, and music understanding, performing well in tasks like intent classification, spoken command classification, semantic and grammatical reasoning., multilingual speech recognition, and singing analysis. It also shows greater robustness against hallucinations than other large audio-language models (LALMs). However, it struggles with tasks such as audio duration prediction and instrument classification. Additionally, GPT-4o&#39;s safety mechanisms cause it to decline tasks like speaker identification, age classification, MOS prediction, and audio deepfake detection. Notably, the model exhibits a significantly different refusal rate when responding to speaker verification tasks on different datasets. This is likely due to variations in the accompanying instructions or the quality of the input audio, suggesting the sensitivity of its built-in safeguards. Finally, we acknowledge that model performance varies with evaluation protocols. This report only serves as a preliminary exploration of the current state of LALMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09940v1-abstract-full').style.display = 'none'; document.getElementById('2502.09940v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Work in progress</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.09621">arXiv:2502.09621</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.09621">pdf</a>, <a href="https://arxiv.org/format/2502.09621">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> MME-CoT: Benchmarking Chain-of-Thought in Large Multimodal Models for Reasoning Quality, Robustness, and Efficiency </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Jiang%2C+D">Dongzhi Jiang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+R">Renrui Zhang</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+Z">Ziyu Guo</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yanwei Li</a>, <a href="/search/?searchtype=author&amp;query=Qi%2C+Y">Yu Qi</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xinyan Chen</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+L">Liuhui Wang</a>, <a href="/search/?searchtype=author&amp;query=Jin%2C+J">Jianhan Jin</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+C">Claire Guo</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+S">Shen Yan</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+B">Bo Zhang</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+C">Chaoyou Fu</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+P">Peng Gao</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+H">Hongsheng Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.09621v1-abstract-short" style="display: inline;"> Answering questions with Chain-of-Thought (CoT) has significantly enhanced the reasoning capabilities of Large Language Models (LLMs), yet its impact on Large Multimodal Models (LMMs) still lacks a systematic assessment and in-depth investigation. In this paper, we introduce MME-CoT, a specialized benchmark evaluating the CoT reasoning performance of LMMs, spanning six domains: math, science, OCR,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09621v1-abstract-full').style.display = 'inline'; document.getElementById('2502.09621v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.09621v1-abstract-full" style="display: none;"> Answering questions with Chain-of-Thought (CoT) has significantly enhanced the reasoning capabilities of Large Language Models (LLMs), yet its impact on Large Multimodal Models (LMMs) still lacks a systematic assessment and in-depth investigation. In this paper, we introduce MME-CoT, a specialized benchmark evaluating the CoT reasoning performance of LMMs, spanning six domains: math, science, OCR, logic, space-time, and general scenes. As the first comprehensive study in this area, we propose a thorough evaluation suite incorporating three novel metrics that assess the reasoning quality, robustness, and efficiency at a fine-grained level. Leveraging curated high-quality data and a unique evaluation strategy, we conduct an in-depth analysis of state-of-the-art LMMs, uncovering several key insights: 1) Models with reflection mechanism demonstrate a superior CoT quality, with Kimi k1.5 outperforming GPT-4o and demonstrating the highest quality results; 2) CoT prompting often degrades LMM performance on perception-heavy tasks, suggesting a potentially harmful overthinking behavior; and 3) Although the CoT quality is high, LMMs with reflection exhibit significant inefficiency in both normal response and self-correction phases. We hope MME-CoT serves as a foundation for advancing multimodal reasoning in LMMs. Project Page: https://mmecot.github.io/ <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09621v1-abstract-full').style.display = 'none'; document.getElementById('2502.09621v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project Page: https://mmecot.github.io/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.09449">arXiv:2502.09449</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.09449">pdf</a>, <a href="https://arxiv.org/format/2502.09449">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> Spiking Neural Networks for Temporal Processing: Status Quo and Future Prospects </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ma%2C+C">Chenxiang Ma</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xinyi Chen</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yanchen Li</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+Q">Qu Yang</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Y">Yujie Wu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+G">Guoqi Li</a>, <a href="/search/?searchtype=author&amp;query=Pan%2C+G">Gang Pan</a>, <a href="/search/?searchtype=author&amp;query=Tang%2C+H">Huajin Tang</a>, <a href="/search/?searchtype=author&amp;query=Tan%2C+K+C">Kay Chen Tan</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+J">Jibin Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.09449v1-abstract-short" style="display: inline;"> Temporal processing is fundamental for both biological and artificial intelligence systems, as it enables the comprehension of dynamic environments and facilitates timely responses. Spiking Neural Networks (SNNs) excel in handling such data with high efficiency, owing to their rich neuronal dynamics and sparse activity patterns. Given the recent surge in the development of SNNs, there is an urgent&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09449v1-abstract-full').style.display = 'inline'; document.getElementById('2502.09449v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.09449v1-abstract-full" style="display: none;"> Temporal processing is fundamental for both biological and artificial intelligence systems, as it enables the comprehension of dynamic environments and facilitates timely responses. Spiking Neural Networks (SNNs) excel in handling such data with high efficiency, owing to their rich neuronal dynamics and sparse activity patterns. Given the recent surge in the development of SNNs, there is an urgent need for a comprehensive evaluation of their temporal processing capabilities. In this paper, we first conduct an in-depth assessment of commonly used neuromorphic benchmarks, revealing critical limitations in their ability to evaluate the temporal processing capabilities of SNNs. To bridge this gap, we further introduce a benchmark suite consisting of three temporal processing tasks characterized by rich temporal dynamics across multiple timescales. Utilizing this benchmark suite, we perform a thorough evaluation of recently introduced SNN approaches to elucidate the current status of SNNs in temporal processing. Our findings indicate significant advancements in recently developed spiking neuron models and neural architectures regarding their temporal processing capabilities, while also highlighting a performance gap in handling long-range dependencies when compared to state-of-the-art non-spiking models. Finally, we discuss the key challenges and outline potential avenues for future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09449v1-abstract-full').style.display = 'none'; document.getElementById('2502.09449v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.09183">arXiv:2502.09183</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.09183">pdf</a>, <a href="https://arxiv.org/format/2502.09183">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> RefineCoder: Iterative Improving of Large Language Models via Adaptive Critique Refinement for Code Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+C">Changzhi Zhou</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+X">Xinyu Zhang</a>, <a href="/search/?searchtype=author&amp;query=Song%2C+D">Dandan Song</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiancai Chen</a>, <a href="/search/?searchtype=author&amp;query=Gu%2C+W">Wanli Gu</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+H">Huipeng Ma</a>, <a href="/search/?searchtype=author&amp;query=Tian%2C+Y">Yuhang Tian</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+M">Mengdi Zhang</a>, <a href="/search/?searchtype=author&amp;query=Hu%2C+L">Linmei Hu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.09183v1-abstract-short" style="display: inline;"> Code generation has attracted increasing attention with the rise of Large Language Models (LLMs). Many studies have developed powerful code LLMs by synthesizing code-related instruction data and applying supervised fine-tuning. However, these methods are limited by teacher model distillation and ignore the potential of iterative refinement by self-generated code. In this paper, we propose Adaptive&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09183v1-abstract-full').style.display = 'inline'; document.getElementById('2502.09183v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.09183v1-abstract-full" style="display: none;"> Code generation has attracted increasing attention with the rise of Large Language Models (LLMs). Many studies have developed powerful code LLMs by synthesizing code-related instruction data and applying supervised fine-tuning. However, these methods are limited by teacher model distillation and ignore the potential of iterative refinement by self-generated code. In this paper, we propose Adaptive Critique Refinement (ACR), which enables the model to refine itself by self-generated code and external critique, rather than directly imitating the code responses of the teacher model. Concretely, ACR includes a composite scoring system with LLM-as-a-Judge to evaluate the quality of code responses and a selective critique strategy with LLM-as-a-Critic to critique self-generated low-quality code responses. We develop the RefineCoder series by iteratively applying ACR, achieving continuous performance improvement on multiple code generation benchmarks. Compared to the baselines of the same size, our proposed RefineCoder series can achieve comparable or even superior performance using less data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09183v1-abstract-full').style.display = 'none'; document.getElementById('2502.09183v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">work in process</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08929">arXiv:2502.08929</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.08929">pdf</a>, <a href="https://arxiv.org/ps/2502.08929">ps</a>, <a href="https://arxiv.org/format/2502.08929">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Precise Measurement of the $蠂_{c0}$ Resonance Parameters and Branching Fractions of $蠂_{c0,c2}\to蟺^+蟺^-/K^+K^-$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a>, <a href="/search/?searchtype=author&amp;query=Brueggemann%2C+A">A. Brueggemann</a> , et al. (648 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08929v1-abstract-short" style="display: inline;"> By analyzing a $蠄(3686)$ data sample containing $(107.7\pm0.6)\times10^{6}$ events taken with the BESIII detector at the BEPCII storage ring in 2009, the $蠂_{c0}$ resonance parameters are precisely measured using $蠂_{c0,c2} \to 蟺^+蟺^-/K^+K^-$ events. The mass of $蠂_{c0}$ is determined to be $M(蠂_{c0})=(3415.67\pm0.07\pm0.06\pm0.07$)~MeV/$c^2$, and its full width is&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08929v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08929v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08929v1-abstract-full" style="display: none;"> By analyzing a $蠄(3686)$ data sample containing $(107.7\pm0.6)\times10^{6}$ events taken with the BESIII detector at the BEPCII storage ring in 2009, the $蠂_{c0}$ resonance parameters are precisely measured using $蠂_{c0,c2} \to 蟺^+蟺^-/K^+K^-$ events. The mass of $蠂_{c0}$ is determined to be $M(蠂_{c0})=(3415.67\pm0.07\pm0.06\pm0.07$)~MeV/$c^2$, and its full width is $螕(蠂_{c0})=(12.44\pm0.12\pm0.12)~{\rm MeV}$, where the first uncertainty is statistical, the second systematic, and the third for mass comes from $蠂_{c2}$ mass uncertainty. These measurements improve the precision of $蠂_{c0}$ mass by a factor of four and width by one order of magnitude over the previous individual measurements, and significantly boost our knowledge about the charmonium spectrum. Together with additional $(345.4\pm2.6)\times10^{6}$ $蠄(3686)$ data events taken in 2012, the decay branching fractions of $蠂_{c0,c2}\to蟺^+蟺^-/K^+K^-$ are measured as well, with precision improved by a factor of three compared to previous measurements. These $蠂_{c0}$ decay branching fractions provide important inputs for the study of glueballs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08929v1-abstract-full').style.display = 'none'; document.getElementById('2502.08929v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 1 figure</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08913">arXiv:2502.08913</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.08913">pdf</a>, <a href="https://arxiv.org/format/2502.08913">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cosmology and Nongalactic Astrophysics">astro-ph.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> </div> </div> <p class="title is-5 mathjax"> Searching for axion dark matter gegenschein of the Vela supernova remnant with FAST </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Yang%2C+W">Wenxiu Yang</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+Y">Yitian Sun</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yougang Wang</a>, <a href="/search/?searchtype=author&amp;query=Schutz%2C+K">Katelin Schutz</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yichao Li</a>, <a href="/search/?searchtype=author&amp;query=Leung%2C+C">Calvin Leung</a>, <a href="/search/?searchtype=author&amp;query=Hu%2C+W">Wenkai Hu</a>, <a href="/search/?searchtype=author&amp;query=Shu%2C+S">Shuanghao Shu</a>, <a href="/search/?searchtype=author&amp;query=Masui%2C+K">Kiyoshi Masui</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xuelei Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08913v1-abstract-short" style="display: inline;"> Axions are one of the leading dark matter candidates. If we are embedded in a Milky Way dark matter halo comprised of axions, their stimulated decay would enable us to observe a counterimage (``axion gegenschein&#34;) with a frequency equal to half the axion mass in the opposite direction of a bright radio source. This spectral line emission will be broadened to $螖谓/谓\sim 蟽_d/c \sim 10^{-3}$ due to th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08913v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08913v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08913v1-abstract-full" style="display: none;"> Axions are one of the leading dark matter candidates. If we are embedded in a Milky Way dark matter halo comprised of axions, their stimulated decay would enable us to observe a counterimage (``axion gegenschein&#34;) with a frequency equal to half the axion mass in the opposite direction of a bright radio source. This spectral line emission will be broadened to $螖谓/谓\sim 蟽_d/c \sim 10^{-3}$ due to the velocity dispersion of dark matter, $蟽_d$. In this pilot study, we perform the first search for the expected axion gegenschein image of Vela supernova remnant (SNR) with 26.4 hours of effective ON-OFF data from the Five-hundred-meter Aperture Spherical radio Telescope (FAST) L-band (1.0 - 1.5~GHz) 19-beam receiver. Our null detection limits the axion-photon coupling strength to be $g_{a纬纬} \lesssim 2 \times 10^{-10} \mathrm{GeV}^{-1}$ in the mass ranges of $8.7\,渭\mathrm{eV} \leq m_a \leq 9.44\,渭\mathrm{eV}$ and $10.85\,渭\mathrm{eV} \leq m_a \leq 12.01\,渭\mathrm{eV} $. These results provide a stronger constraint on $g_{a纬纬}$ in this axion mass range than the current limits obtained by the direct search of axion decay signal from galaxy clusters which uses FAST observations, but is a factor of $\sim 3$ times weaker than the current CAST limit.Based on our observation strategy, data processing methods, and results, the expected sensitivity will reach $\sim 10^{-11}\mathrm{GeV}^{-1}$ with $\sim 2000$ hours of observation in the future. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08913v1-abstract-full').style.display = 'none'; document.getElementById('2502.08913v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 23 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08481">arXiv:2502.08481</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.08481">pdf</a>, <a href="https://arxiv.org/format/2502.08481">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> </div> </div> <p class="title is-5 mathjax"> Reconstructing the Anisotropic Ultra-long Wavelength Spectra using a Single Antenna on Lunar-orbit </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ao%2C+Q">Qige Ao</a>, <a href="/search/?searchtype=author&amp;query=Deng%2C+F">Furen Deng</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+Y">Yidong Xu</a>, <a href="/search/?searchtype=author&amp;query=Yue%2C+B">Bin Yue</a>, <a href="/search/?searchtype=author&amp;query=Shan%2C+H">Huanyuan Shan</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xuelei Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08481v1-abstract-short" style="display: inline;"> The ultra-long wavelength sky ($谓\lesssim 30$ MHz) is still largely unexplored, as the electromagnetic wave is heavily absorbed and distorted by the ionosphere on Earth. The far-side of the Moon, either in lunar-orbit or on lunar-surface, is the ideal site for observations in this band, and the upcoming Moon-based interferometers will obtain multi-frequency high-resolution sky maps. Making use of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08481v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08481v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08481v1-abstract-full" style="display: none;"> The ultra-long wavelength sky ($谓\lesssim 30$ MHz) is still largely unexplored, as the electromagnetic wave is heavily absorbed and distorted by the ionosphere on Earth. The far-side of the Moon, either in lunar-orbit or on lunar-surface, is the ideal site for observations in this band, and the upcoming Moon-based interferometers will obtain multi-frequency high-resolution sky maps. Making use of the lunar occultation of the sky and the anisotropy of antenna primary beam response, we propose a novel method to reconstruct the ultra-long wavelength spectral shape in multiple directions in the sky using only one antenna on lunar orbit. We apply the method to one antenna on one of the nine daughter satellites of the proposed Discovering the Sky at Longest wavelength (DSL) project. Using simulated observation data between 1 - 30 MHz from one dipole antenna, we find that the spectra for different regions on the sky can be reconstructed very well and the free-free absorption feature in each region can be derived from the reconstructed spectra. This work demonstrates the feasibility to reconstruct the anisotropic ultra-long wavelength spectra with very limited instrumentation on a lunar-orbit, with mature technologies already in place. It extends the application of such kind of satellite in revealing the distribution of free electrons in the Galactic interstellar medium from the distribution of absorption features in the ultra-long wavelength sky. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08481v1-abstract-full').style.display = 'none'; document.getElementById('2502.08481v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 10 figures, 2 tables, submitted to ApJ</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08378">arXiv:2502.08378</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.08378">pdf</a>, <a href="https://arxiv.org/format/2502.08378">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Learning Humanoid Standing-up Control across Diverse Postures </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+T">Tao Huang</a>, <a href="/search/?searchtype=author&amp;query=Ren%2C+J">Junli Ren</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+H">Huayi Wang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Z">Zirui Wang</a>, <a href="/search/?searchtype=author&amp;query=Ben%2C+Q">Qingwei Ben</a>, <a href="/search/?searchtype=author&amp;query=Wen%2C+M">Muning Wen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiao Chen</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+J">Jianan Li</a>, <a href="/search/?searchtype=author&amp;query=Pang%2C+J">Jiangmiao Pang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08378v1-abstract-short" style="display: inline;"> Standing-up control is crucial for humanoid robots, with the potential for integration into current locomotion and loco-manipulation systems, such as fall recovery. Existing approaches are either limited to simulations that overlook hardware constraints or rely on predefined ground-specific motion trajectories, failing to enable standing up across postures in real-world scenes. To bridge this gap,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08378v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08378v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08378v1-abstract-full" style="display: none;"> Standing-up control is crucial for humanoid robots, with the potential for integration into current locomotion and loco-manipulation systems, such as fall recovery. Existing approaches are either limited to simulations that overlook hardware constraints or rely on predefined ground-specific motion trajectories, failing to enable standing up across postures in real-world scenes. To bridge this gap, we present HoST (Humanoid Standing-up Control), a reinforcement learning framework that learns standing-up control from scratch, enabling robust sim-to-real transfer across diverse postures. HoST effectively learns posture-adaptive motions by leveraging a multi-critic architecture and curriculum-based training on diverse simulated terrains. To ensure successful real-world deployment, we constrain the motion with smoothness regularization and implicit motion speed bound to alleviate oscillatory and violent motions on physical hardware, respectively. After simulation-based training, the learned control policies are directly deployed on the Unitree G1 humanoid robot. Our experimental results demonstrate that the controllers achieve smooth, stable, and robust standing-up motions across a wide range of laboratory and outdoor environments. Videos are available at https://taohuang13.github.io/humanoid-standingup.github.io/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08378v1-abstract-full').style.display = 'none'; document.getElementById('2502.08378v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Humanoid Standing-up Control, 12 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08221">arXiv:2502.08221</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.08221">pdf</a>, <a href="https://arxiv.org/format/2502.08221">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Take What You Need: Flexible Multi-Task Semantic Communications with Channel Adaptation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiang Chen</a>, <a href="/search/?searchtype=author&amp;query=Gan%2C+S">Shuying Gan</a>, <a href="/search/?searchtype=author&amp;query=Feng%2C+C">Chenyuan Feng</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+X">Xijun Wang</a>, <a href="/search/?searchtype=author&amp;query=Quek%2C+T+Q+S">Tony Q. S. Quek</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08221v1-abstract-short" style="display: inline;"> The growing demand for efficient semantic communication systems capable of managing diverse tasks and adapting to fluctuating channel conditions has driven the development of robust, resource-efficient frameworks. This article introduces a novel channel-adaptive and multi-task-aware semantic communication framework based on a masked auto-encoder architecture. Our framework optimizes the transmissi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08221v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08221v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08221v1-abstract-full" style="display: none;"> The growing demand for efficient semantic communication systems capable of managing diverse tasks and adapting to fluctuating channel conditions has driven the development of robust, resource-efficient frameworks. This article introduces a novel channel-adaptive and multi-task-aware semantic communication framework based on a masked auto-encoder architecture. Our framework optimizes the transmission of meaningful information by incorporating a multi-task-aware scoring mechanism that identifies and prioritizes semantically significant data across multiple concurrent tasks. A channel-aware extractor is employed to dynamically select relevant information in response to real-time channel conditions. By jointly optimizing semantic relevance and transmission efficiency, the framework ensures minimal performance degradation under resource constraints. Experimental results demonstrate the superior performance of our framework compared to conventional methods in tasks such as image reconstruction and object detection. These results underscore the framework&#39;s adaptability to heterogeneous channel environments and its scalability for multi-task applications, positioning it as a promising solution for next-generation semantic communication networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08221v1-abstract-full').style.display = 'none'; document.getElementById('2502.08221v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08184">arXiv:2502.08184</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.08184">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1002/adma.201903498">10.1002/adma.201903498 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Strong and Tunable Electrical-Anisotropy in Type-II Weyl Semimetal Candidate WP2 with Broken Inversion Symmetry </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Su%2C+B">Bo Su</a>, <a href="/search/?searchtype=author&amp;query=Song%2C+Y">Yanpeng Song</a>, <a href="/search/?searchtype=author&amp;query=Hou%2C+Y">Yanhui Hou</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xu Chen</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+J">Jianzhou Zhao</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+Y">Yongchang Ma</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+Y">Yang Yang</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+J">Jiangang Guo</a>, <a href="/search/?searchtype=author&amp;query=Luo%2C+J">Jianlin Luo</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Z">Zhi-Guo Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08184v1-abstract-short" style="display: inline;"> A transition metal diphosphide WP2 is a candidate for type-II Weyl semimetals (WSMs) in which spatial inversion symmetry is broken and Lorentz invariance is violated. As one of the key prerequisites for the presence of the WSM state in WP2, spatial inversion symmetry breaking in this compound has rarely been investigated by experiments. Furthermore, how much anisotropy the electrical properties of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08184v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08184v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08184v1-abstract-full" style="display: none;"> A transition metal diphosphide WP2 is a candidate for type-II Weyl semimetals (WSMs) in which spatial inversion symmetry is broken and Lorentz invariance is violated. As one of the key prerequisites for the presence of the WSM state in WP2, spatial inversion symmetry breaking in this compound has rarely been investigated by experiments. Furthermore, how much anisotropy the electrical properties of WP2 have and whether its electrical anisotropy can be tuned remain elusive. Here, we report angle-resolved polarized Raman spectroscopy, electrical transport, optical spectroscopy and first-principle studies of WP2. The energies of the observed Raman-active phonons and the angle dependences of the phonon intensities are well consistent with the results obtained by first-principle calculations and the analysis of the proposed crystal symmetry without spatial inversion, providing evidence that spatial inversion symmetry is broken in WP2. Moreover, the measured ratio (Rc/Ra) between the crystalline c-axis and a-axis electrical resistivities exhibits a weak dependence on temperature from 100 to 250 K, but increases abruptly below 100 K, and then reaches the value of 8.0 at 10 K, which is by far the strongest in-plane electrical resistivity anisotropy among the reported type-II WSM candidates with comparable carrier concentrations. Our optical-spectroscopy and calculation studies reveal that the abrupt enhancement of the Rc/Ra below 100 K mainly arises from a sharp increase in the scattering rate anisotropy at low temperatures. More interestingly, the Rc/Ra at 10 K can be tuned from 8.0 to 10.6 as the magnetic field increases from 0 to 9 T. The stronge and tunable electrical resistivity anisotropy found in WP2 can serve as a degree of freedom for tuning the electrical properties of type-II WSMs, which paves the way for developing novel electronic applications based on type-II WSMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08184v1-abstract-full').style.display = 'none'; document.getElementById('2502.08184v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in Advanced Materials</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Advanced Materials 31, 1903498 (2019) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08169">arXiv:2502.08169</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.08169">pdf</a>, <a href="https://arxiv.org/format/2502.08169">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> CoDynTrust: Robust Asynchronous Collaborative Perception via Dynamic Feature Trust Modulus </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Xu%2C+Y">Yunjiang Xu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+L">Lingzhi Li</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+J">Jin Wang</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+B">Benyuan Yang</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Z">Zhiwen Wu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xinhong Chen</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+J">Jianping Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08169v1-abstract-short" style="display: inline;"> Collaborative perception, fusing information from multiple agents, can extend perception range so as to improve perception performance. However, temporal asynchrony in real-world environments, caused by communication delays, clock misalignment, or sampling configuration differences, can lead to information mismatches. If this is not well handled, then the collaborative performance is patchy, and w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08169v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08169v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08169v1-abstract-full" style="display: none;"> Collaborative perception, fusing information from multiple agents, can extend perception range so as to improve perception performance. However, temporal asynchrony in real-world environments, caused by communication delays, clock misalignment, or sampling configuration differences, can lead to information mismatches. If this is not well handled, then the collaborative performance is patchy, and what&#39;s worse safety accidents may occur. To tackle this challenge, we propose CoDynTrust, an uncertainty-encoded asynchronous fusion perception framework that is robust to the information mismatches caused by temporal asynchrony. CoDynTrust generates dynamic feature trust modulus (DFTM) for each region of interest by modeling aleatoric and epistemic uncertainty as well as selectively suppressing or retaining single-vehicle features, thereby mitigating information mismatches. We then design a multi-scale fusion module to handle multi-scale feature maps processed by DFTM. Compared to existing works that also consider asynchronous collaborative perception, CoDynTrust combats various low-quality information in temporally asynchronous scenarios and allows uncertainty to be propagated to downstream tasks such as planning and control. Experimental results demonstrate that CoDynTrust significantly reduces performance degradation caused by temporal asynchrony across multiple datasets, achieving state-of-the-art detection performance even with temporal asynchrony. The code is available at https://github.com/CrazyShout/CoDynTrust. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08169v1-abstract-full').style.display = 'none'; document.getElementById('2502.08169v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 5 figures, conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08164">arXiv:2502.08164</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.08164">pdf</a>, <a href="https://arxiv.org/format/2502.08164">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> </div> </div> <p class="title is-5 mathjax"> Dynamical Models of the Milky Way in Action Space with LAMOST DR8 and GAIA EDR3 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Sun%2C+G">Guang-Chen Sun</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Q">Qiao Wang</a>, <a href="/search/?searchtype=author&amp;query=Mao%2C+S">Shude Mao</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yichao Li</a>, <a href="/search/?searchtype=author&amp;query=Long%2C+R+J">Richard J. Long</a>, <a href="/search/?searchtype=author&amp;query=Ding%2C+P">Ping-Jie Ding</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yougang Wang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+X">Xin Zhang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xuelei Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08164v2-abstract-short" style="display: inline;"> This work explores dynamical models of the Milky Way (MW) by analyzing a sample of 86,109 K giant stars selected through cross-matching the LAMOST DR8 and Gaia EDR3 surveys. Our earlier torus models in Wang et al. (2017) did not include Gaia data, making them incompatible with the new proper motion distributions of samples. Here, we refine the construction of action-based, self-consistent models t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08164v2-abstract-full').style.display = 'inline'; document.getElementById('2502.08164v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08164v2-abstract-full" style="display: none;"> This work explores dynamical models of the Milky Way (MW) by analyzing a sample of 86,109 K giant stars selected through cross-matching the LAMOST DR8 and Gaia EDR3 surveys. Our earlier torus models in Wang et al. (2017) did not include Gaia data, making them incompatible with the new proper motion distributions of samples. Here, we refine the construction of action-based, self-consistent models to constrain the three-dimensional velocity distribution of K giants over a larger parameter space, drawing on a series of existing MW models. This approach produces several new MW models. Our best-fit model for the local kinematics near the Sun indicates a MW virial mass of 1.35 $\times 10^{12} M_\odot$, a local stellar density of 0.0696 $\rm M_\odot pc^{-3}$, and a local dark matter density of 0.0115 $\rm M_\odot pc^{-3}$. Our main conclusion supports a thicker and more extended thick disk, alongside a cooler thin disk, compared to the best-fitting model in Wang et al. (2017). Near the Sun, our model aligns well with observations, but is less satisfactory at distances far from the Galactic center, perhaps implying unidentified structures. Further high-precision observations will be critical for understanding the dynamics in these outer Galactic regions, and will require a more realistic model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08164v2-abstract-full').style.display = 'none'; document.getElementById('2502.08164v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">27 pages, 27 figures, 4 tables. Accepted for publication in ApJ</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08083">arXiv:2502.08083</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.08083">pdf</a>, <a href="https://arxiv.org/format/2502.08083">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Mixture of Decoupled Message Passing Experts with Entropy Constraint for General Node Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xuanze Chen</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+J">Jiajun Zhou</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+J">Jinsong Chen</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+S">Shanqing Yu</a>, <a href="/search/?searchtype=author&amp;query=Xuan%2C+Q">Qi Xuan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08083v1-abstract-short" style="display: inline;"> The varying degrees of homophily and heterophily in real-world graphs persistently constrain the universality of graph neural networks (GNNs) for node classification. Adopting a data-centric perspective, this work reveals an inherent preference of different graphs towards distinct message encoding schemes: homophilous graphs favor local propagation, while heterophilous graphs exhibit preference fo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08083v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08083v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08083v1-abstract-full" style="display: none;"> The varying degrees of homophily and heterophily in real-world graphs persistently constrain the universality of graph neural networks (GNNs) for node classification. Adopting a data-centric perspective, this work reveals an inherent preference of different graphs towards distinct message encoding schemes: homophilous graphs favor local propagation, while heterophilous graphs exhibit preference for flexible combinations of propagation and transformation. To address this, we propose GNNMoE, a universal node classification framework based on the Mixture-of-Experts (MoE) mechanism. The framework first constructs diverse message-passing experts through recombination of fine-grained encoding operators, then designs soft and hard gating layers to allocate the most suitable expert networks for each node&#39;s representation learning, thereby enhancing both model expressiveness and adaptability to diverse graphs. Furthermore, considering that soft gating might introduce encoding noise in homophilous scenarios, we introduce an entropy constraint to guide sharpening of soft gates, achieving organic integration of weighted combination and Top-K selection. Extensive experiments demonstrate that GNNMoE significantly outperforms mainstream GNNs, heterophilous GNNs, and graph transformers in both node classification performance and universality across diverse graph datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08083v1-abstract-full').style.display = 'none'; document.getElementById('2502.08083v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: text overlap with arXiv:2412.08193</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.07885">arXiv:2502.07885</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.07885">pdf</a>, <a href="https://arxiv.org/format/2502.07885">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Astrophysical Phenomena">astro-ph.HE</span> </div> </div> <p class="title is-5 mathjax"> A Luminous Red Optical Flare and Hard X-ray Emission in the Tidal Disruption Event AT2024kmq </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ho%2C+A+Y+Q">Anna Y. Q. Ho</a>, <a href="/search/?searchtype=author&amp;query=Yao%2C+Y">Yuhan Yao</a>, <a href="/search/?searchtype=author&amp;query=Matsumoto%2C+T">Tatsuya Matsumoto</a>, <a href="/search/?searchtype=author&amp;query=Schroeder%2C+G">Genevieve Schroeder</a>, <a href="/search/?searchtype=author&amp;query=Coughlin%2C+E">Eric Coughlin</a>, <a href="/search/?searchtype=author&amp;query=Perley%2C+D+A">Daniel A. Perley</a>, <a href="/search/?searchtype=author&amp;query=Andreoni%2C+I">Igor Andreoni</a>, <a href="/search/?searchtype=author&amp;query=Bellm%2C+E+C">Eric C. Bellm</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+T+X">Tracy X. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chornock%2C+R">Ryan Chornock</a>, <a href="/search/?searchtype=author&amp;query=Covarrubias%2C+S">Sofia Covarrubias</a>, <a href="/search/?searchtype=author&amp;query=Das%2C+K">Kaustav Das</a>, <a href="/search/?searchtype=author&amp;query=Fremling%2C+C">Christoffer Fremling</a>, <a href="/search/?searchtype=author&amp;query=Gilfanov%2C+M">Marat Gilfanov</a>, <a href="/search/?searchtype=author&amp;query=Hinds%2C+K+R">K. R. Hinds</a>, <a href="/search/?searchtype=author&amp;query=Jarvis%2C+D">Dan Jarvis</a>, <a href="/search/?searchtype=author&amp;query=Kasliwal%2C+M+M">Mansi M. Kasliwal</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+C">Chang Liu</a>, <a href="/search/?searchtype=author&amp;query=Lyman%2C+J+D">Joseph D. Lyman</a>, <a href="/search/?searchtype=author&amp;query=Masci%2C+F+J">Frank J. Masci</a>, <a href="/search/?searchtype=author&amp;query=Prince%2C+T+A">Thomas A. Prince</a>, <a href="/search/?searchtype=author&amp;query=Ravi%2C+V">Vikram Ravi</a>, <a href="/search/?searchtype=author&amp;query=Rich%2C+R+M">R. Michael Rich</a>, <a href="/search/?searchtype=author&amp;query=Riddle%2C+R">Reed Riddle</a>, <a href="/search/?searchtype=author&amp;query=Sevilla%2C+J">Jason Sevilla</a> , et al. (8 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.07885v1-abstract-short" style="display: inline;"> We present the optical discovery and multiwavelength follow-up observations of AT2024kmq, a likely tidal disruption event (TDE) associated with a supermassive ($M_{\rm BH}\sim 10^{8} M_\odot$) black hole in a massive galaxy at $z=0.192$. The optical light curve of AT2024kmq exhibits two distinct peaks: an early fast (timescale 1 d) and luminous ($M\approx-20$ mag) red peak, then a slower (timescal&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07885v1-abstract-full').style.display = 'inline'; document.getElementById('2502.07885v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.07885v1-abstract-full" style="display: none;"> We present the optical discovery and multiwavelength follow-up observations of AT2024kmq, a likely tidal disruption event (TDE) associated with a supermassive ($M_{\rm BH}\sim 10^{8} M_\odot$) black hole in a massive galaxy at $z=0.192$. The optical light curve of AT2024kmq exhibits two distinct peaks: an early fast (timescale 1 d) and luminous ($M\approx-20$ mag) red peak, then a slower (timescale 1 month) blue peak with a higher optical luminosity ($M\approx-22$ mag) and featureless optical spectra. The second component is similar to the spectroscopic class of &#34;featureless TDEs&#34; in the literature, and during this second component we detect highly variable, luminous ($L_X\approx 10^{44}$ erg s$^{-1}$), and hard ($f_谓\propto 谓^{-1.5}$) X-ray emission. Luminous ($10^{29} $erg s$^{-1}$ Hz$^{-1}$ at 10 GHz) but unchanging radio emission likely arises from an underlying active galactic nucleus. The luminosity, timescale, and color of the early red optical peak can be explained by synchrotron emission, or alternatively by thermal emission from material at a large radius ($R\approx\mathrm{few}\times10^{15}$ cm). Possible physical origins for this early red component include an off-axis relativistic jet, and shocks from self-intersecting debris leading to the formation of the accretion disk. Late-time radio observations will help distinguish between the two possibilities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07885v1-abstract-full').style.display = 'none'; document.getElementById('2502.07885v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23 pages, 7 figures, 6 tables. Submitted to journal on 11 Feb 2025. Comments welcome</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.07590">arXiv:2502.07590</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.07590">pdf</a>, <a href="https://arxiv.org/format/2502.07590">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DSV: Exploiting Dynamic Sparsity to Accelerate Large-Scale Video DiT Training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Tan%2C+X">Xin Tan</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yuetao Chen</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+Y">Yimin Jiang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xing Chen</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+K">Kun Yan</a>, <a href="/search/?searchtype=author&amp;query=Duan%2C+N">Nan Duan</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+Y">Yibo Zhu</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+D">Daxin Jiang</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+H">Hong Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.07590v1-abstract-short" style="display: inline;"> Diffusion Transformers (DiTs) have shown remarkable performance in modeling and generating high-quality videos. However, the quadratic computational complexity of 3D full attention mechanism presents significant challenges in scaling video DiT training, especially for high-definition and lengthy videos, where attention can dominate up to 95% of the end-to-end time and necessitate specialized commu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07590v1-abstract-full').style.display = 'inline'; document.getElementById('2502.07590v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.07590v1-abstract-full" style="display: none;"> Diffusion Transformers (DiTs) have shown remarkable performance in modeling and generating high-quality videos. However, the quadratic computational complexity of 3D full attention mechanism presents significant challenges in scaling video DiT training, especially for high-definition and lengthy videos, where attention can dominate up to 95% of the end-to-end time and necessitate specialized communication paradigms to handle large input sizes. This paper introduces DSV, a novel framework designed to accelerate and scale the training of video DiTs by leveraging the inherent dynamic attention sparsity throughout the training process. DSV employs a two-stage training algorithm that exploits sparsity patterns, focusing on critical elements supported by efficient, tailored kernels. To accommodate the new sparsity dimension, we develop a hybrid sparsity-aware context parallelism that effectively scales to large inputs by addressing the heterogeneity of sparsity across attention heads and blocks, resulting in optimized sparse computation and communication. Extensive evaluations demonstrate that DSV achieves up to 3.02x gain in training throughput with nearly no quality degradation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07590v1-abstract-full').style.display = 'none'; document.getElementById('2502.07590v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.07483">arXiv:2502.07483</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.07483">pdf</a>, <a href="https://arxiv.org/ps/2502.07483">ps</a>, <a href="https://arxiv.org/format/2502.07483">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Rings and Algebras">math.RA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Commutative Algebra">math.AC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Representation Theory">math.RT</span> </div> </div> <p class="title is-5 mathjax"> Higher-dimensional module factorizations and complete intersections </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiao-Wu Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.07483v1-abstract-short" style="display: inline;"> We introduce higher-dimensional module factorizations associated to a regular sequence. They include higher-dimensional matrix factorizations, which are commutative cubes consisting of free modules with edges being classical matrix factorizations. We characterize the stable category of maximal Cohen-Macaulay modules over a complete intersection via higher-dimensional matrix factorizations over the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07483v1-abstract-full').style.display = 'inline'; document.getElementById('2502.07483v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.07483v1-abstract-full" style="display: none;"> We introduce higher-dimensional module factorizations associated to a regular sequence. They include higher-dimensional matrix factorizations, which are commutative cubes consisting of free modules with edges being classical matrix factorizations. We characterize the stable category of maximal Cohen-Macaulay modules over a complete intersection via higher-dimensional matrix factorizations over the corresponding regular local ring. The result generalizes to noncommutative rings, including quantum complete intersections. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07483v1-abstract-full').style.display = 'none'; document.getElementById('2502.07483v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Any comments are welcome</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.07406">arXiv:2502.07406</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.07406">pdf</a>, <a href="https://arxiv.org/format/2502.07406">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Search for $e^+e^-\to K_S^0 K_S^0 h_c$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (642 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.07406v1-abstract-short" style="display: inline;"> Using $e^+e^-$ collision data at 13 center-of-mass energies ranging from 4.600 to 4.950 GeV collected with the BESIII detector, we search for the unmeasured $e^+e^-\to K_S^0 K_S^0 h_c$ process . No significant signal is observed, and the upper limits of the Born cross sections at each center-of-mass energy are presented. </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.07406v1-abstract-full" style="display: none;"> Using $e^+e^-$ collision data at 13 center-of-mass energies ranging from 4.600 to 4.950 GeV collected with the BESIII detector, we search for the unmeasured $e^+e^-\to K_S^0 K_S^0 h_c$ process . No significant signal is observed, and the upper limits of the Born cross sections at each center-of-mass energy are presented. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07406v1-abstract-full').style.display = 'none'; document.getElementById('2502.07406v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.07317">arXiv:2502.07317</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.07317">pdf</a>, <a href="https://arxiv.org/format/2502.07317">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Position reconstruction and surface background model for the PandaX-4T detector </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Qian%2C+Z">Zhicheng Qian</a>, <a href="/search/?searchtype=author&amp;query=Gu%2C+L">Linhui Gu</a>, <a href="/search/?searchtype=author&amp;query=Cheng%2C+C">Chen Cheng</a>, <a href="/search/?searchtype=author&amp;query=Bo%2C+Z">Zihao Bo</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+W">Wei Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xun Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yunhua Chen</a>, <a href="/search/?searchtype=author&amp;query=Cheng%2C+Z">Zhaokan Cheng</a>, <a href="/search/?searchtype=author&amp;query=Cui%2C+X">Xiangyi Cui</a>, <a href="/search/?searchtype=author&amp;query=Fan%2C+Y">Yingjie Fan</a>, <a href="/search/?searchtype=author&amp;query=Fang%2C+D">Deqing Fang</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+Z">Zhixing Gao</a>, <a href="/search/?searchtype=author&amp;query=Geng%2C+L">Lisheng Geng</a>, <a href="/search/?searchtype=author&amp;query=Giboni%2C+K">Karl Giboni</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+X">Xunan Guo</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+X">Xuyuan Guo</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+Z">Zichao Guo</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+C">Chencheng Han</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+K">Ke Han</a>, <a href="/search/?searchtype=author&amp;query=He%2C+C">Changda He</a>, <a href="/search/?searchtype=author&amp;query=He%2C+J">Jinrong He</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+D">Di Huang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+H">Houqi Huang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+J">Junting Huang</a>, <a href="/search/?searchtype=author&amp;query=Hou%2C+R">Ruquan Hou</a> , et al. (78 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.07317v1-abstract-short" style="display: inline;"> We report the position reconstruction methods and surface background model for the PandaX-4T dark matter direct search experiment. This work develops two position reconstruction algorithms: template matching (TM) method and photon acceptance function (PAF) method. Both methods determine the horizontal position of events based on the light pattern of secondary scintillation collected by the light s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07317v1-abstract-full').style.display = 'inline'; document.getElementById('2502.07317v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.07317v1-abstract-full" style="display: none;"> We report the position reconstruction methods and surface background model for the PandaX-4T dark matter direct search experiment. This work develops two position reconstruction algorithms: template matching (TM) method and photon acceptance function (PAF) method. Both methods determine the horizontal position of events based on the light pattern of secondary scintillation collected by the light sensors. After a comprehensive evaluation of resolution, uniformity, and robustness, the PAF method was selected for position reconstruction, while the TM method was employed for verification. The PAF method achieves a bulk event resolution of 1.0 mm and a surface event resolution of 4.4 mm for a typical $S2$ signal with a bottom charge of 1500 PE (about 14 keV). The uniformity is around 20\%. Robustness studies reveal average deviations of 5.1 mm and 8.8 mm for the commissioning run (Run0) and the first science run (Run1), respectively, due to the deactivation of certain PMTs. A data-driven surface background model is developed based on the PAF method. The surface background is estimated to be $0.09 \pm 0.06$ events for Run0 (0.54 tonne$\cdot$year) and $0.17 \pm 0.11$ events for Run1 (1.00 tonne$\cdot$year). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07317v1-abstract-full').style.display = 'none'; document.getElementById('2502.07317v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 15 figures, 2 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.07056">arXiv:2502.07056</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.07056">pdf</a>, <a href="https://arxiv.org/format/2502.07056">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Autonomous Deep Agent </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Yu%2C+A">Amy Yu</a>, <a href="/search/?searchtype=author&amp;query=Lebedev%2C+E">Erik Lebedev</a>, <a href="/search/?searchtype=author&amp;query=Everett%2C+L">Lincoln Everett</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiaoxin Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+T">Terry Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.07056v1-abstract-short" style="display: inline;"> This technical brief introduces Deep Agent, an advanced autonomous AI system designed to manage complex multi-phase tasks through a novel hierarchical task management architecture. The system&#39;s foundation is built on our Hierarchical Task DAG (HTDAG) framework, which dynamically decomposes high-level objectives into manageable sub-tasks while rigorously maintaining dependencies and execution coher&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07056v1-abstract-full').style.display = 'inline'; document.getElementById('2502.07056v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.07056v1-abstract-full" style="display: none;"> This technical brief introduces Deep Agent, an advanced autonomous AI system designed to manage complex multi-phase tasks through a novel hierarchical task management architecture. The system&#39;s foundation is built on our Hierarchical Task DAG (HTDAG) framework, which dynamically decomposes high-level objectives into manageable sub-tasks while rigorously maintaining dependencies and execution coherence. Deep Agent advances beyond traditional agent systems through three key innovations: First, it implements a recursive two-stage planner-executor architecture that enables continuous task refinement and adaptation as circumstances change. Second, it features an Autonomous API &amp; Tool Creation (AATC) system that automatically generates reusable components from UI interactions, substantially reducing operational costs for similar tasks. Third, it incorporates Prompt Tweaking Engine and Autonomous Prompt Feedback Learning components that optimize Large Language Model prompts for specific scenarios, enhancing both inference accuracy and operational stability. These components are integrated to form a service infrastructure that manages user contexts, handles complex task dependencies, and orchestrates end-to-end agentic workflow execution. Through this sophisticated architecture, Deep Agent establishes a novel paradigm in self-governing AI systems, demonstrating robust capability to independently handle intricate, multi-step tasks while maintaining consistent efficiency and reliability through continuous self-optimization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07056v1-abstract-full').style.display = 'none'; document.getElementById('2502.07056v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.6; I.2.7 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06904">arXiv:2502.06904</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.06904">pdf</a>, <a href="https://arxiv.org/format/2502.06904">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> </div> </div> <p class="title is-5 mathjax"> Distinguishing thermal fluctuations from polaron formation in halide perovskites </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhao%2C+B">Bai-Qing Zhao</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xuan-Yan Chen</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+C">Chuan-Nan Li</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+J">Jinshan Li</a>, <a href="/search/?searchtype=author&amp;query=Van+de+Walle%2C+C+G">Chris G. Van de Walle</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+X">Xie Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06904v2-abstract-short" style="display: inline;"> Recent angle-resolved photoelectron spectroscopy (ARPES) measurements of the hole effective mass in CsPbBr$_3$ revealed an enhancement of $\sim$50 % compared to the bare mass computed from first principles for CsPbBr$_3$ at $T = 0 K$. This large enhancement was interpreted as evidence of polaron formation. Employing accurate finite-temperature first-principles calculations, we show that the calcul&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06904v2-abstract-full').style.display = 'inline'; document.getElementById('2502.06904v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06904v2-abstract-full" style="display: none;"> Recent angle-resolved photoelectron spectroscopy (ARPES) measurements of the hole effective mass in CsPbBr$_3$ revealed an enhancement of $\sim$50 % compared to the bare mass computed from first principles for CsPbBr$_3$ at $T = 0 K$. This large enhancement was interpreted as evidence of polaron formation. Employing accurate finite-temperature first-principles calculations, we show that the calculated hole effective mass of CsPbBr$_3$ at $T = 300 K$ can explain experimental results without invoking polarons. Thermal fluctuations are particularly strong in halide perovskites compared to conventional semiconductors such as Si and GaAs, and cannot be ignored when comparing with experiment. We not only resolve the debate on polaron formation in halide perovskites, but also demonstrate the general importance of including thermal fluctuations in first-principles calculations for strongly anharmonic materials. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06904v2-abstract-full').style.display = 'none'; document.getElementById('2502.06904v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06838">arXiv:2502.06838</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.06838">pdf</a>, <a href="https://arxiv.org/format/2502.06838">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> TorchResist: Open-Source Differentiable Resist Simulator </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z">Zixiao Wang</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+J">Jieya Zhou</a>, <a href="/search/?searchtype=author&amp;query=Zheng%2C+S">Su Zheng</a>, <a href="/search/?searchtype=author&amp;query=Yin%2C+S">Shuo Yin</a>, <a href="/search/?searchtype=author&amp;query=Liang%2C+K">Kaichao Liang</a>, <a href="/search/?searchtype=author&amp;query=Hu%2C+S">Shoubo Hu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiao Chen</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+B">Bei Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06838v1-abstract-short" style="display: inline;"> Recent decades have witnessed remarkable advancements in artificial intelligence (AI), including large language models (LLMs), image and video generative models, and embodied AI systems. These advancements have led to an explosive increase in the demand for computational power, challenging the limits of Moore&#39;s Law. Optical lithography, a critical technology in semiconductor manufacturing, faces s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06838v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06838v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06838v1-abstract-full" style="display: none;"> Recent decades have witnessed remarkable advancements in artificial intelligence (AI), including large language models (LLMs), image and video generative models, and embodied AI systems. These advancements have led to an explosive increase in the demand for computational power, challenging the limits of Moore&#39;s Law. Optical lithography, a critical technology in semiconductor manufacturing, faces significant challenges due to its high costs. To address this, various lithography simulators have been developed. However, many of these simulators are limited by their inadequate photoresist modeling capabilities. This paper presents TorchResist, an open-source, differentiable photoresist simulator.TorchResist employs an analytical approach to model the photoresist process, functioning as a white-box system with at most twenty interpretable parameters. Leveraging modern differentiable programming techniques and parallel computing on GPUs, TorchResist enables seamless co-optimization with other tools across multiple related tasks. Our experimental results demonstrate that TorchResist achieves superior accuracy and efficiency compared to existing solutions. The source code is publicly available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06838v1-abstract-full').style.display = 'none'; document.getElementById('2502.06838v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">SPIE Advanced Lithography + Patterning, 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06823">arXiv:2502.06823</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.06823">pdf</a>, <a href="https://arxiv.org/format/2502.06823">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Graphics">cs.GR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> CTR-Driven Advertising Image Generation with Multimodal Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xingye Chen</a>, <a href="/search/?searchtype=author&amp;query=Feng%2C+W">Wei Feng</a>, <a href="/search/?searchtype=author&amp;query=Du%2C+Z">Zhenbang Du</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+W">Weizhen Wang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yanyin Chen</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+H">Haohan Wang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+L">Linkai Liu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yaoyu Li</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+J">Jinyuan Zhao</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yu Li</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Z">Zheng Zhang</a>, <a href="/search/?searchtype=author&amp;query=Lv%2C+J">Jingjing Lv</a>, <a href="/search/?searchtype=author&amp;query=Shen%2C+J">Junjie Shen</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+Z">Zhangang Lin</a>, <a href="/search/?searchtype=author&amp;query=Shao%2C+J">Jingping Shao</a>, <a href="/search/?searchtype=author&amp;query=Shao%2C+Y">Yuanjie Shao</a>, <a href="/search/?searchtype=author&amp;query=You%2C+X">Xinge You</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+C">Changxin Gao</a>, <a href="/search/?searchtype=author&amp;query=Sang%2C+N">Nong Sang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06823v1-abstract-short" style="display: inline;"> In web data, advertising images are crucial for capturing user attention and improving advertising effectiveness. Most existing methods generate background for products primarily focus on the aesthetic quality, which may fail to achieve satisfactory online performance. To address this limitation, we explore the use of Multimodal Large Language Models (MLLMs) for generating advertising images by op&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06823v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06823v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06823v1-abstract-full" style="display: none;"> In web data, advertising images are crucial for capturing user attention and improving advertising effectiveness. Most existing methods generate background for products primarily focus on the aesthetic quality, which may fail to achieve satisfactory online performance. To address this limitation, we explore the use of Multimodal Large Language Models (MLLMs) for generating advertising images by optimizing for Click-Through Rate (CTR) as the primary objective. Firstly, we build targeted pre-training tasks, and leverage a large-scale e-commerce multimodal dataset to equip MLLMs with initial capabilities for advertising image generation tasks. To further improve the CTR of generated images, we propose a novel reward model to fine-tune pre-trained MLLMs through Reinforcement Learning (RL), which can jointly utilize multimodal features and accurately reflect user click preferences. Meanwhile, a product-centric preference optimization strategy is developed to ensure that the generated background content aligns with the product characteristics after fine-tuning, enhancing the overall relevance and effectiveness of the advertising images. Extensive experiments have demonstrated that our method achieves state-of-the-art performance in both online and offline metrics. Our code and pre-trained models are publicly available at: https://github.com/Chenguoz/CAIG. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06823v1-abstract-full').style.display = 'none'; document.getElementById('2502.06823v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to WWW 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06490">arXiv:2502.06490</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.06490">pdf</a>, <a href="https://arxiv.org/format/2502.06490">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Recent Advances in Discrete Speech Tokens: A Review </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Guo%2C+Y">Yiwei Guo</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Z">Zhihan Li</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+H">Hankun Wang</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+B">Bohan Li</a>, <a href="/search/?searchtype=author&amp;query=Shao%2C+C">Chongtian Shao</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+H">Hanglei Zhang</a>, <a href="/search/?searchtype=author&amp;query=Du%2C+C">Chenpeng Du</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xie Chen</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+S">Shujie Liu</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+K">Kai Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06490v1-abstract-short" style="display: inline;"> The rapid advancement of speech generation technologies in the era of large language models (LLMs) has established discrete speech tokens as a foundational paradigm for speech representation. These tokens, characterized by their discrete, compact, and concise nature, are not only advantageous for efficient transmission and storage, but also inherently compatible with the language modeling framewor&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06490v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06490v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06490v1-abstract-full" style="display: none;"> The rapid advancement of speech generation technologies in the era of large language models (LLMs) has established discrete speech tokens as a foundational paradigm for speech representation. These tokens, characterized by their discrete, compact, and concise nature, are not only advantageous for efficient transmission and storage, but also inherently compatible with the language modeling framework, enabling seamless integration of speech into text-dominated LLM architectures. Current research categorizes discrete speech tokens into two principal classes: acoustic tokens and semantic tokens, each of which has evolved into a rich research domain characterized by unique design philosophies and methodological approaches. This survey systematically synthesizes the existing taxonomy and recent innovations in discrete speech tokenization, conducts a critical examination of the strengths and limitations of each paradigm, and presents systematic experimental comparisons across token types. Furthermore, we identify persistent challenges in the field and propose potential research directions, aiming to offer actionable insights to inspire future advancements in the development and application of discrete speech tokens. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06490v1-abstract-full').style.display = 'none'; document.getElementById('2502.06490v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">26 pages, 8 figures, 3 tables. Work in progress</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06453">arXiv:2502.06453</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.06453">pdf</a>, <a href="https://arxiv.org/format/2502.06453">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> MATH-Perturb: Benchmarking LLMs&#39; Math Reasoning Abilities against Hard Perturbations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+K">Kaixuan Huang</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+J">Jiacheng Guo</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Z">Zihao Li</a>, <a href="/search/?searchtype=author&amp;query=Ji%2C+X">Xiang Ji</a>, <a href="/search/?searchtype=author&amp;query=Ge%2C+J">Jiawei Ge</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+W">Wenzhe Li</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+Y">Yingqing Guo</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+T">Tianle Cai</a>, <a href="/search/?searchtype=author&amp;query=Yuan%2C+H">Hui Yuan</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+R">Runzhe Wang</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Y">Yue Wu</a>, <a href="/search/?searchtype=author&amp;query=Yin%2C+M">Ming Yin</a>, <a href="/search/?searchtype=author&amp;query=Tang%2C+S">Shange Tang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+Y">Yangsibo Huang</a>, <a href="/search/?searchtype=author&amp;query=Jin%2C+C">Chi Jin</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xinyun Chen</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+C">Chiyuan Zhang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+M">Mengdi Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06453v2-abstract-short" style="display: inline;"> Large language models have demonstrated impressive performance on challenging mathematical reasoning tasks, which has triggered the discussion of whether the performance is achieved by true reasoning capability or memorization. To investigate this question, prior work has constructed mathematical benchmarks when questions undergo simple perturbations -- modifications that still preserve the underl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06453v2-abstract-full').style.display = 'inline'; document.getElementById('2502.06453v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06453v2-abstract-full" style="display: none;"> Large language models have demonstrated impressive performance on challenging mathematical reasoning tasks, which has triggered the discussion of whether the performance is achieved by true reasoning capability or memorization. To investigate this question, prior work has constructed mathematical benchmarks when questions undergo simple perturbations -- modifications that still preserve the underlying reasoning patterns of the solutions. However, no work has explored hard perturbations, which fundamentally change the nature of the problem so that the original solution steps do not apply. To bridge the gap, we construct MATH-P-Simple and MATH-P-Hard via simple perturbation and hard perturbation, respectively. Each consists of 279 perturbed math problems derived from level-5 (hardest) problems in the MATH dataset (Hendrycksmath et. al., 2021). We observe significant performance drops on MATH-P-Hard across various models, including o1-mini (-16.49%) and gemini-2.0-flash-thinking (-12.9%). We also raise concerns about a novel form of memorization where models blindly apply learned problem-solving skills without assessing their applicability to modified contexts. This issue is amplified when using original problems for in-context learning. We call for research efforts to address this challenge, which is critical for developing more robust and reliable reasoning models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06453v2-abstract-full').style.display = 'none'; document.getElementById('2502.06453v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">v2: fix bugs in Fig. 1</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06168">arXiv:2502.06168</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.06168">pdf</a>, <a href="https://arxiv.org/format/2502.06168">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Econometrics">econ.EM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> Dynamic Pricing with Adversarially-Censored Demands </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Xu%2C+J">Jianyu Xu</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yining Wang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xi Chen</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yu-Xiang Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06168v1-abstract-short" style="display: inline;"> We study an online dynamic pricing problem where the potential demand at each time period $t=1,2,\ldots, T$ is stochastic and dependent on the price. However, a perishable inventory is imposed at the beginning of each time $t$, censoring the potential demand if it exceeds the inventory level. To address this problem, we introduce a pricing algorithm based on the optimistic estimates of derivatives&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06168v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06168v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06168v1-abstract-full" style="display: none;"> We study an online dynamic pricing problem where the potential demand at each time period $t=1,2,\ldots, T$ is stochastic and dependent on the price. However, a perishable inventory is imposed at the beginning of each time $t$, censoring the potential demand if it exceeds the inventory level. To address this problem, we introduce a pricing algorithm based on the optimistic estimates of derivatives. We show that our algorithm achieves $\tilde{O}(\sqrt{T})$ optimal regret even with adversarial inventory series. Our findings advance the state-of-the-art in online decision-making problems with censored feedback, offering a theoretically optimal solution against adversarial observations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06168v1-abstract-full').style.display = 'none'; document.getElementById('2502.06168v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">33 pages, 1 figure</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 91B06; 91B24; 62P20; 62C20; 90B50 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.6 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06160">arXiv:2502.06160</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.06160">pdf</a>, <a href="https://arxiv.org/format/2502.06160">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Astrophysical Phenomena">astro-ph.HE</span> </div> </div> <p class="title is-5 mathjax"> Impact of mass transfer on the orbital evolution of a white dwarf close to an intermediate-mass black hole </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Yang%2C+Y">Yang Yang</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+J">Jie Yang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xian Chen</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Z">Zihan Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06160v1-abstract-short" style="display: inline;"> Extreme mass ratio inspiral (EMRI) systems composed of low-mass white dwarfs (WDs, $0.1 - 0.3$ $\mathrm{M}_{\odot } $) and intermediate-mass black holes (IMBHs, $10^{3} - 10^{5}$ $\mathrm{M}_{\odot } $) are ideal objects for multi-messenger astronomy because they produce both gravitational wave (GW) and electromagnetic (EM) signals. Both relativistic effects and the mass transfer (MT) process are&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06160v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06160v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06160v1-abstract-full" style="display: none;"> Extreme mass ratio inspiral (EMRI) systems composed of low-mass white dwarfs (WDs, $0.1 - 0.3$ $\mathrm{M}_{\odot } $) and intermediate-mass black holes (IMBHs, $10^{3} - 10^{5}$ $\mathrm{M}_{\odot } $) are ideal objects for multi-messenger astronomy because they produce both gravitational wave (GW) and electromagnetic (EM) signals. Both relativistic effects and the mass transfer (MT) process are important for determining orbital dynamics, but the current model has not taken these ingredients fully into account. Here we use a perturbed Keplerian framework and the post-Newtonian (PN) formalism to model the relativistic orbit of a WD around a spinning IMBH. We pay special attention to the dynamical evolution during a narrow phase near the orbital pericenter where the WD fills the Roche lobe and starts MT. We find that gravitational radiation and MT have opposing effects on orbital evolution. When MT predominates, the orbital period and eccentricity could may increase, sometimes enabling the WD to escape and avoid tidal disruption. Additionally, we estimate the time required for the GW phase to shift by one radian due to the MT process and identify cases where this phase shift will be detectable by future GW observations. The temporal expansion of the orbit during MT offers a potential explanation for the disappearance of quasi-periodic eruptions (QPEs) found in several X-ray transients, highlighting the importance of including both the relativistic and MT processes in the WD-IMBH model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06160v1-abstract-full').style.display = 'none'; document.getElementById('2502.06160v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages, 11 figures, prepared for submission to MNRAS</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06101">arXiv:2502.06101</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.06101">pdf</a>, <a href="https://arxiv.org/format/2502.06101">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> RALLRec: Improving Retrieval Augmented Large Language Model Recommendation with Representation Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Xu%2C+J">Jian Xu</a>, <a href="/search/?searchtype=author&amp;query=Luo%2C+S">Sichun Luo</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiangyu Chen</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+H">Haoming Huang</a>, <a href="/search/?searchtype=author&amp;query=Hou%2C+H">Hanxu Hou</a>, <a href="/search/?searchtype=author&amp;query=Song%2C+L">Linqi Song</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06101v2-abstract-short" style="display: inline;"> Large Language Models (LLMs) have been integrated into recommendation systems to enhance user behavior comprehension. The Retrieval Augmented Generation (RAG) technique is further incorporated into these systems to retrieve more relevant items and improve system performance. However, existing RAG methods rely primarily on textual semantics and often fail to incorporate the most relevant items, lim&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06101v2-abstract-full').style.display = 'inline'; document.getElementById('2502.06101v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06101v2-abstract-full" style="display: none;"> Large Language Models (LLMs) have been integrated into recommendation systems to enhance user behavior comprehension. The Retrieval Augmented Generation (RAG) technique is further incorporated into these systems to retrieve more relevant items and improve system performance. However, existing RAG methods rely primarily on textual semantics and often fail to incorporate the most relevant items, limiting the effectiveness of the systems. In this paper, we propose Representation learning for retrieval-Augmented Large Language model Recommendation (RALLRec). Specifically, we enhance textual semantics by prompting LLMs to generate more detailed item descriptions, followed by joint representation learning of textual and collaborative semantics, which are extracted by the LLM and recommendation models, respectively. Considering the potential time-varying characteristics of user interest, a simple yet effective reranking method is further introduced to capture the dynamics of user preference. We conducted extensive experiments on three real-world datasets, and the evaluation results validated the effectiveness of our method. Code is made public at https://github.com/JianXu95/RALLRec. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06101v2-abstract-full').style.display = 'none'; document.getElementById('2502.06101v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by TheWebConf&#39;25 (WWW&#39;25) as a Short Paper</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06024">arXiv:2502.06024</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.06024">pdf</a>, <a href="https://arxiv.org/ps/2502.06024">ps</a>, <a href="https://arxiv.org/format/2502.06024">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> </div> </div> <p class="title is-5 mathjax"> Improved Sublinear Algorithms for Classical and Quantum Graph Coloring </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ferber%2C+A">Asaf Ferber</a>, <a href="/search/?searchtype=author&amp;query=Hardiman%2C+L">Liam Hardiman</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiaonan Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06024v1-abstract-short" style="display: inline;"> We present three sublinear randomized algorithms for vertex-coloring of graphs with maximum degree $螖$. The first is a simple algorithm that extends the idea of Morris and Song to color graphs with maximum degree $螖$ using $螖+1$ colors. Combined with the greedy algorithm, it achieves an expected runtime of $O(n^{3/2}\sqrt{\log n})$ in the query model, improving on Assadi, Chen, and Khanna&#39;s algori&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06024v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06024v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06024v1-abstract-full" style="display: none;"> We present three sublinear randomized algorithms for vertex-coloring of graphs with maximum degree $螖$. The first is a simple algorithm that extends the idea of Morris and Song to color graphs with maximum degree $螖$ using $螖+1$ colors. Combined with the greedy algorithm, it achieves an expected runtime of $O(n^{3/2}\sqrt{\log n})$ in the query model, improving on Assadi, Chen, and Khanna&#39;s algorithm by a $\sqrt{\log n}$ factor in expectation. When we allow quantum queries to the graph, we can accelerate the first algorithm using Grover&#39;s famous algorithm, resulting in a runtime of $\tilde{O}(n^{4/3})$ quantum queries. Finally, we introduce a quantum algorithm for $(1+蔚)螖$-coloring, achieving $O(蔚^{-1}n^{5/4}\log^{3/2}n)$ quantum queries, offering a polynomial improvement over the previous best bound by Morris and Song. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06024v1-abstract-full').style.display = 'none'; document.getElementById('2502.06024v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.05764">arXiv:2502.05764</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.05764">pdf</a>, <a href="https://arxiv.org/ps/2502.05764">ps</a>, <a href="https://arxiv.org/format/2502.05764">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Populations and Evolution">q-bio.PE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.chaos.2025.116070">10.1016/j.chaos.2025.116070 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Coevolutionary dynamics of feedback-evolving games in structured populations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+Q">Qiushuang Wang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiaojie Chen</a>, <a href="/search/?searchtype=author&amp;query=Szolnoki%2C+A">Attila Szolnoki</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.05764v1-abstract-short" style="display: inline;"> The interdependence between an individual strategy decision and the resulting change of environmental state is often a subtle process. Feedback-evolving games have been a prevalent framework for studying such feedback in well-mixed populations, yielding important insights into the coevolutionary dynamics. However, since real populations are usually structured, it is essential to explore how popula&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05764v1-abstract-full').style.display = 'inline'; document.getElementById('2502.05764v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.05764v1-abstract-full" style="display: none;"> The interdependence between an individual strategy decision and the resulting change of environmental state is often a subtle process. Feedback-evolving games have been a prevalent framework for studying such feedback in well-mixed populations, yielding important insights into the coevolutionary dynamics. However, since real populations are usually structured, it is essential to explore how population structure affects such coevolutionary dynamics. Our work proposes a coevolution model of strategies and environmental state in a structured population depicted by a regular graph. We investigate the system dynamics, and theoretically demonstrate that there exist different evolutionary outcomes including oscillation, bistability, the coexistence of oscillation and dominance, as well as the coexistence of cooperation and defection. Our theoretical predictions are validated through numerical calculations. By using Monte Carlo simulations we examine how the number of neighbors influences the coevolutionary dynamics, particularly the size of the attractive domain of the replete environmental state in the cases of bistability or cooperation-defection coexistence. Specifically, in the case of bistability, a larger neighborhood size may be beneficial to save the environment when the environmental enhancement rate by cooperation / degradation rate by defection is high. Conversely, if this ratio is low, a smaller neighborhood size is more beneficial. In the case of cooperator-defector coexistence, environmental maintenance is basically influenced by individual payoffs. When the ratio of temptation minus reward versus punishment minus sucker&#39;s payoff is high, a larger neighborhood size is more favorable. In contrast, when the mentioned ratio is low, a smaller neighborhood size is more advantageous. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05764v1-abstract-full').style.display = 'none'; document.getElementById('2502.05764v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Wang, Q., Chen, X. and Szolnoki, A., 2025. Coevolutionary dynamics of feedback-evolving games in structured populations. Chaos, Solitons &amp; Fractals, 193, p.116070 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.05587">arXiv:2502.05587</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.05587">pdf</a>, <a href="https://arxiv.org/format/2502.05587">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> </div> </div> <p class="title is-5 mathjax"> The Host Galaxy of the Hyperactive Repeating FRB 20240114A: Behind a Galaxy Cluster </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiang-Lei Chen</a>, <a href="/search/?searchtype=author&amp;query=Tsai%2C+C">Chao-Wei Tsai</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+D">Di Li</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+P">Pei Wang</a>, <a href="/search/?searchtype=author&amp;query=Feng%2C+Y">Yi Feng</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+J">Jun-Shuo Zhang</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+G">Guo-Dong Li</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Yong-Kun Zhang</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+L">Lu-Lu Bao</a>, <a href="/search/?searchtype=author&amp;query=Liao%2C+M">Mai Liao</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+L">Lu-Dan Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zuo%2C+P">Pei Zuo</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+D">Dong-Wei Bao</a>, <a href="/search/?searchtype=author&amp;query=Niu%2C+C">Chen-Hui Niu</a>, <a href="/search/?searchtype=author&amp;query=Luo%2C+R">Rui Luo</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+W">Wei-Wei Zhu</a>, <a href="/search/?searchtype=author&amp;query=Zou%2C+H">Hu Zou</a>, <a href="/search/?searchtype=author&amp;query=Xue%2C+S">Sui-Jian Xue</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+B">Bing Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.05587v1-abstract-short" style="display: inline;"> We report on the optical spectroscopic observations of the host galaxy of the hyperactive repeating fast radio burst, FRB 20240114A. The host galaxy is a dwarf galaxy at a redshift of $z=0.1306\pm0.0002$. With a rest-frame coverage of 4300-7900 脜, we have detected H$\rm伪$, H$\rm尾$, [O III]$位位$4959,5007, [N II]$位位$6548,6583, and [S II]$位$6716 emission lines. The emission line ratios suggest that th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05587v1-abstract-full').style.display = 'inline'; document.getElementById('2502.05587v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.05587v1-abstract-full" style="display: none;"> We report on the optical spectroscopic observations of the host galaxy of the hyperactive repeating fast radio burst, FRB 20240114A. The host galaxy is a dwarf galaxy at a redshift of $z=0.1306\pm0.0002$. With a rest-frame coverage of 4300-7900 脜, we have detected H$\rm伪$, H$\rm尾$, [O III]$位位$4959,5007, [N II]$位位$6548,6583, and [S II]$位$6716 emission lines. The emission line ratios suggest that the ionization in the host galaxy is dominated by star formation. The star formation rate (SFR) derived from the H$\rm伪$ emission line is $(0.06 \pm 0.01) \ \rm{M_{\odot} \ yr^{-1}}$, and the SED fitting suggests the lower limit of the SFR(UV) is $0.09 \ \rm{M_{\odot} \ yr^{-1}}$. The stellar mass is $(\rm 4.0 \pm 1.8) \times 10^8 \ M_{\odot}$, making the specific star formation rate $\rm log \ sSFR(H\rm 伪) = -9.17 \pm 0.07 \ yr^{-1}$. The line ratios indicate an upper limit of a metallicity of $\rm 12+log_{10} ([O/H]) \sim 8.5$. As the nearest dwarf host galaxy with a repeating FRB, the activity of FRB 20240114A and the properties of this host galaxy closely resemble those of FRB 20121102A and FRB 20190520B. The H$\rm伪$-traced dispersion measure (DM) provided by the ionized gas of the host galaxy has a moderate contribution of $\sim 200 \rm \ pc \ cm^{-3}$, assuming a warm ionized gas. We found that the distributions of the stellar mass versus SFR are significantly different between repeating and one-off FRBs, as determined by the MANOVA test with $p=0.0116$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05587v1-abstract-full').style.display = 'none'; document.getElementById('2502.05587v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ApJL</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.05566">arXiv:2502.05566</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.05566">pdf</a>, <a href="https://arxiv.org/ps/2502.05566">ps</a>, <a href="https://arxiv.org/format/2502.05566">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> Dynamic Systems Coupled with Solutions of Stochastic Nonsmooth Convex Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Luo%2C+J">Jianfeng Luo</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiaojun Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.05566v1-abstract-short" style="display: inline;"> In this paper, we study ordinary differential equations (ODE) coupled with solutions of a stochastic nonsmooth convex optimization problem (SNCOP). We use the regularization approach, the sample average approximation and the time-stepping method to construct discrete approximation problems. We show the existence of solutions to the original problem and the discrete problems. Moreover, we show that&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05566v1-abstract-full').style.display = 'inline'; document.getElementById('2502.05566v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.05566v1-abstract-full" style="display: none;"> In this paper, we study ordinary differential equations (ODE) coupled with solutions of a stochastic nonsmooth convex optimization problem (SNCOP). We use the regularization approach, the sample average approximation and the time-stepping method to construct discrete approximation problems. We show the existence of solutions to the original problem and the discrete problems. Moreover, we show that the optimal solution of the SNCOP with a strong convex objective function admits a linear growth condition and the optimal solution of the regularized SNCOP converges to the least-norm solution of the original SNCOP, which are crucial for us to derive the convergence results of the discrete problems. We illustrate the theoretical results and applications for the estimation of the time-varying parameters in ODE by numerical examples. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05566v1-abstract-full').style.display = 'none'; document.getElementById('2502.05566v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 90C15; 90C33; 90C39 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.04877">arXiv:2502.04877</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.04877">pdf</a>, <a href="https://arxiv.org/format/2502.04877">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/MVT.2025.3531088">10.1109/MVT.2025.3531088 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Terahertz Integrated Sensing and Communication-Empowered UAVs in 6G: A Transceiver Design Perspective </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhang%2C+R">Ruoyu Zhang</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+W">Wen Wu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiaoming Chen</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+Z">Zhen Gao</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+Y">Yueming Cai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.04877v1-abstract-short" style="display: inline;"> Due to their high maneuverability, flexible deployment, and low cost, unmanned aerial vehicles (UAVs) are expected to play a pivotal role in not only communication, but also sensing. Especially by exploiting the ultra-wide bandwidth of terahertz (THz) bands, integrated sensing and communication (ISAC)-empowered UAV has been a promising technology of 6G space-air-ground integrated networks. In this&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04877v1-abstract-full').style.display = 'inline'; document.getElementById('2502.04877v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.04877v1-abstract-full" style="display: none;"> Due to their high maneuverability, flexible deployment, and low cost, unmanned aerial vehicles (UAVs) are expected to play a pivotal role in not only communication, but also sensing. Especially by exploiting the ultra-wide bandwidth of terahertz (THz) bands, integrated sensing and communication (ISAC)-empowered UAV has been a promising technology of 6G space-air-ground integrated networks. In this article, we systematically investigate the key techniques and essential obstacles for THz-ISAC-empowered UAV from a transceiver design perspective, with the highlight of its major challenges and key technologies. Specifically, we discuss the THz-ISAC-UAV wireless propagation environment, based on which several channel characteristics for communication and sensing are revealed. We point out the transceiver payload design peculiarities for THz-ISAC-UAV from the perspective of antenna design, radio frequency front-end, and baseband signal processing. To deal with the specificities faced by the payload, we shed light on three key technologies, i.e., hybrid beamforming for ultra-massive MIMO-ISAC, power-efficient THz-ISAC waveform design, as well as communication and sensing channel state information acquisition, and extensively elaborate their concepts and key issues. More importantly, future research directions and associated open problems are presented, which may unleash the full potential of THz-ISAC-UAV for 6G wireless networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04877v1-abstract-full').style.display = 'none'; document.getElementById('2502.04877v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Vehicular Technology Magazine, 2025 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.04848">arXiv:2502.04848</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.04848">pdf</a>, <a href="https://arxiv.org/format/2502.04848">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Astrophysical Phenomena">astro-ph.HE</span> </div> </div> <p class="title is-5 mathjax"> Broadband $纬$-ray spectrum of supernova remnant Cassiopeia A </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Cao%2C+Z">Zhen Cao</a>, <a href="/search/?searchtype=author&amp;query=Aharonian%2C+F">F. Aharonian</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y+X">Y. X. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+Y+W">Y. W. Bao</a>, <a href="/search/?searchtype=author&amp;query=Bastieri%2C+D">D. Bastieri</a>, <a href="/search/?searchtype=author&amp;query=Bi%2C+X+J">X. J. Bi</a>, <a href="/search/?searchtype=author&amp;query=Bi%2C+Y+J">Y. J. Bi</a>, <a href="/search/?searchtype=author&amp;query=Bian%2C+W">W. Bian</a>, <a href="/search/?searchtype=author&amp;query=Bukevich%2C+A+V">A. V. Bukevich</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+C+M">C. M. Cai</a>, <a href="/search/?searchtype=author&amp;query=Cao%2C+W+Y">W. Y. Cao</a>, <a href="/search/?searchtype=author&amp;query=Cao%2C+Z">Zhe Cao</a>, <a href="/search/?searchtype=author&amp;query=Chang%2C+J">J. Chang</a>, <a href="/search/?searchtype=author&amp;query=Chang%2C+J+F">J. F. Chang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+A+M">A. M. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+E+S">E. S. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+H+X">H. X. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+L">Liang Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+L">Long Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+M+J">M. J. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+M+L">M. L. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Q+H">Q. H. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+S">S. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+S+H">S. H. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+S+Z">S. Z. Chen</a> , et al. (293 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.04848v1-abstract-short" style="display: inline;"> The core-collapse supernova remnant (SNR) Cassiopeia A (Cas A) is one of the brightest galactic radio sources with an angular radius of $\sim$ 2.5 $\arcmin$. Although no extension of this source has been detected in the $纬$-ray band, using more than 1000 days of LHAASO data above $\sim 0.8$ TeV, we find that its spectrum is significantly softer than those obtained with Imaging Air Cherenkov Telesc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04848v1-abstract-full').style.display = 'inline'; document.getElementById('2502.04848v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.04848v1-abstract-full" style="display: none;"> The core-collapse supernova remnant (SNR) Cassiopeia A (Cas A) is one of the brightest galactic radio sources with an angular radius of $\sim$ 2.5 $\arcmin$. Although no extension of this source has been detected in the $纬$-ray band, using more than 1000 days of LHAASO data above $\sim 0.8$ TeV, we find that its spectrum is significantly softer than those obtained with Imaging Air Cherenkov Telescopes (IACTs) and its flux near $\sim 1$ TeV is about two times higher. In combination with analyses of more than 16 years of \textit{Fermi}-LAT data covering $0.1 \, \mathrm{GeV} - 1 \, \mathrm{TeV}$, we find that the spectrum above 30 GeV deviates significantly from a single power-law, and is best described by a smoothly broken power-law with a spectral index of $1.90 \pm 0.15_\mathrm{stat}$ ($3.41 \pm 0.19_\mathrm{stat}$) below (above) a break energy of $0.63 \pm 0.21_\mathrm{stat} \, \mathrm{TeV}$. Given differences in the angular resolution of LHAASO-WCDA and IACTs, TeV $纬$-ray emission detected with LHAASO may have a significant contribution from regions surrounding the SNR illuminated by particles accelerated earlier, which, however, are treated as background by IACTs. Detailed modelling can be used to constrain acceleration processes of TeV particles in the early stage of SNR evolution. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04848v1-abstract-full').style.display = 'none'; document.getElementById('2502.04848v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.04822">arXiv:2502.04822</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.04822">pdf</a>, <a href="https://arxiv.org/format/2502.04822">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Fluid Dynamics">physics.flu-dyn</span> </div> </div> <p class="title is-5 mathjax"> Multiscale circulation in wall-parallel planes of turbulent channel flows </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Duan%2C+P">Peng-Yu Duan</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xi Chen</a>, <a href="/search/?searchtype=author&amp;query=Sreenivasan%2C+K+R">Katepalli R. Sreenivasan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.04822v1-abstract-short" style="display: inline;"> Wall turbulence consists of various sizes of vortical structures that induce flow circulation around a wide range of closed Eulerian loops. Here we investigate the multiscale properties of circulation around such loops in statistically homogeneous planes parallel to the wall. Using a high-resolution direct numerical simulation database of turbulent channels at Reynolds numbers of $Re_蟿=180$, 550,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04822v1-abstract-full').style.display = 'inline'; document.getElementById('2502.04822v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.04822v1-abstract-full" style="display: none;"> Wall turbulence consists of various sizes of vortical structures that induce flow circulation around a wide range of closed Eulerian loops. Here we investigate the multiscale properties of circulation around such loops in statistically homogeneous planes parallel to the wall. Using a high-resolution direct numerical simulation database of turbulent channels at Reynolds numbers of $Re_蟿=180$, 550, 1000 and 5200, circulation statistics are obtained in planes at different wall-normal heights. Intermittency of circulation in the planes of the outer flow ($y^+ \gtrsim 0.1Re_蟿$) takes the form of universal bifractality as in homogeneous and isotropic turbulence. The bifractal character simplifies to space-filling character close to the wall, with scaling exponents that are linear in the moment order, and lower than those given by the Kolmogorov paradigm. The probability density functions of circulation are long-tailed in the outer bifractal region, {with evidence showing their invariance with respect to the loop aspect ratio}, while those in the inner region are closely Gaussian. The unifractality near the wall implies that the circulation there is not intermittent in character. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04822v1-abstract-full').style.display = 'none'; document.getElementById('2502.04822v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.04684">arXiv:2502.04684</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.04684">pdf</a>, <a href="https://arxiv.org/format/2502.04684">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> G2PDiffusion: Genotype-to-Phenotype Prediction with Diffusion Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liu%2C+M">Mengdi Liu</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+Z">Zhangyang Gao</a>, <a href="/search/?searchtype=author&amp;query=Chang%2C+H">Hong Chang</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+S+Z">Stan Z. Li</a>, <a href="/search/?searchtype=author&amp;query=Shan%2C+S">Shiguang Shan</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xilin Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.04684v2-abstract-short" style="display: inline;"> Discovering the genotype-phenotype relationship is crucial for genetic engineering, which will facilitate advances in fields such as crop breeding, conservation biology, and personalized medicine. Current research usually focuses on single species and small datasets due to limitations in phenotypic data collection, especially for traits that require visual assessments or physical measurements. Dec&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04684v2-abstract-full').style.display = 'inline'; document.getElementById('2502.04684v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.04684v2-abstract-full" style="display: none;"> Discovering the genotype-phenotype relationship is crucial for genetic engineering, which will facilitate advances in fields such as crop breeding, conservation biology, and personalized medicine. Current research usually focuses on single species and small datasets due to limitations in phenotypic data collection, especially for traits that require visual assessments or physical measurements. Deciphering complex and composite phenotypes, such as morphology, from genetic data at scale remains an open question. To break through traditional generic models that rely on simplified assumptions, this paper introduces G2PDiffusion, the first-of-its-kind diffusion model designed for genotype-to-phenotype generation across multiple species. Specifically, we use images to represent morphological phenotypes across species and redefine phenotype prediction as conditional image generation. To this end, this paper introduces an environment-enhanced DNA sequence conditioner and trains a stable diffusion model with a novel alignment method to improve genotype-to-phenotype consistency. Extensive experiments demonstrate that our approach enhances phenotype prediction accuracy across species, capturing subtle genetic variations that contribute to observable traits. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04684v2-abstract-full').style.display = 'none'; document.getElementById('2502.04684v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.04602">arXiv:2502.04602</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.04602">pdf</a>, <a href="https://arxiv.org/format/2502.04602">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Extracting and Understanding the Superficial Knowledge in Alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+R">Runjin Chen</a>, <a href="/search/?searchtype=author&amp;query=Perin%2C+G+J">Gabriel Jacob Perin</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xuxi Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xilun Chen</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+Y">Yan Han</a>, <a href="/search/?searchtype=author&amp;query=Hirata%2C+N+S+T">Nina S. T. Hirata</a>, <a href="/search/?searchtype=author&amp;query=Hong%2C+J">Junyuan Hong</a>, <a href="/search/?searchtype=author&amp;query=Kailkhura%2C+B">Bhavya Kailkhura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.04602v1-abstract-short" style="display: inline;"> Alignment of large language models (LLMs) with human values and preferences, often achieved through fine-tuning based on human feedback, is essential for ensuring safe and responsible AI behaviors. However, the process typically requires substantial data and computation resources. Recent studies have revealed that alignment might be attainable at lower costs through simpler methods, such as in-con&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04602v1-abstract-full').style.display = 'inline'; document.getElementById('2502.04602v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.04602v1-abstract-full" style="display: none;"> Alignment of large language models (LLMs) with human values and preferences, often achieved through fine-tuning based on human feedback, is essential for ensuring safe and responsible AI behaviors. However, the process typically requires substantial data and computation resources. Recent studies have revealed that alignment might be attainable at lower costs through simpler methods, such as in-context learning. This leads to the question: Is alignment predominantly superficial? In this paper, we delve into this question and provide a quantitative analysis. We formalize the concept of superficial knowledge, defining it as knowledge that can be acquired through easily token restyling, without affecting the model&#39;s ability to capture underlying causal relationships between tokens. We propose a method to extract and isolate superficial knowledge from aligned models, focusing on the shallow modifications to the final token selection process. By comparing models augmented only with superficial knowledge to fully aligned models, we quantify the superficial portion of alignment. Our findings reveal that while superficial knowledge constitutes a significant portion of alignment, particularly in safety and detoxification tasks, it is not the whole story. Tasks requiring reasoning and contextual understanding still rely on deeper knowledge. Additionally, we demonstrate two practical advantages of isolated superficial knowledge: (1) it can be transferred between models, enabling efficient offsite alignment of larger models using extracted superficial knowledge from smaller models, and (2) it is recoverable, allowing for the restoration of alignment in compromised models without sacrificing performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04602v1-abstract-full').style.display = 'none'; document.getElementById('2502.04602v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.04511">arXiv:2502.04511</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.04511">pdf</a>, <a href="https://arxiv.org/format/2502.04511">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Beyond Sample-Level Feedback: Using Reference-Level Feedback to Guide Data Synthesis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Mehri%2C+S">Shuhaib Mehri</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiusi Chen</a>, <a href="/search/?searchtype=author&amp;query=Ji%2C+H">Heng Ji</a>, <a href="/search/?searchtype=author&amp;query=Hakkani-T%C3%BCr%2C+D">Dilek Hakkani-T眉r</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.04511v1-abstract-short" style="display: inline;"> LLMs demonstrate remarkable capabilities in following natural language instructions, largely due to instruction-tuning on high-quality datasets. While synthetic data generation has emerged as a scalable approach for creating such datasets, maintaining consistent quality standards remains challenging. Recent approaches incorporate feedback to improve data quality, but typically operate at the sampl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04511v1-abstract-full').style.display = 'inline'; document.getElementById('2502.04511v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.04511v1-abstract-full" style="display: none;"> LLMs demonstrate remarkable capabilities in following natural language instructions, largely due to instruction-tuning on high-quality datasets. While synthetic data generation has emerged as a scalable approach for creating such datasets, maintaining consistent quality standards remains challenging. Recent approaches incorporate feedback to improve data quality, but typically operate at the sample level, generating and applying feedback for each response individually. In this work, we propose Reference-Level Feedback, a novel methodology that instead collects feedback based on high-quality reference samples from carefully curated seed data. We use this feedback to capture rich signals of desirable characteristics that can be propagated to newly synthesized data. We present REFED, a dataset of 10K instruction-response pairs synthesized using such feedback. We demonstrate the effectiveness of our approach by showing that Llama-3.1-8B-Instruct finetuned on REFED achieves state-of-the-art performance among similar-sized SFT-based models on AlpacaEval 2.0 and strong results on Arena-Hard. Through extensive experiments, we show that our approach consistently outperforms traditional sample-level feedback methods with significantly fewer feedback collections and improves performance across different model architectures. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04511v1-abstract-full').style.display = 'none'; document.getElementById('2502.04511v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.04399">arXiv:2502.04399</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.04399">pdf</a>, <a href="https://arxiv.org/format/2502.04399">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Online Location Planning for AI-Defined Vehicles: Optimizing Joint Tasks of Order Serving and Spatio-Temporal Heterogeneous Model Fine-Tuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zheng%2C+B">Bokeng Zheng</a>, <a href="/search/?searchtype=author&amp;query=Rao%2C+B">Bo Rao</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+T">Tianxiang Zhu</a>, <a href="/search/?searchtype=author&amp;query=Tan%2C+C+W">Chee Wei Tan</a>, <a href="/search/?searchtype=author&amp;query=Duan%2C+J">Jingpu Duan</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Z">Zhi Zhou</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xu Chen</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+X">Xiaoxi Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.04399v1-abstract-short" style="display: inline;"> Advances in artificial intelligence (AI) including foundation models (FMs), are increasingly transforming human society, with smart city driving the evolution of urban living.Meanwhile, vehicle crowdsensing (VCS) has emerged as a key enabler, leveraging vehicles&#39; mobility and sensor-equipped capabilities. In particular, ride-hailing vehicles can effectively facilitate flexible data collection and&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04399v1-abstract-full').style.display = 'inline'; document.getElementById('2502.04399v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.04399v1-abstract-full" style="display: none;"> Advances in artificial intelligence (AI) including foundation models (FMs), are increasingly transforming human society, with smart city driving the evolution of urban living.Meanwhile, vehicle crowdsensing (VCS) has emerged as a key enabler, leveraging vehicles&#39; mobility and sensor-equipped capabilities. In particular, ride-hailing vehicles can effectively facilitate flexible data collection and contribute towards urban intelligence, despite resource limitations. Therefore, this work explores a promising scenario, where edge-assisted vehicles perform joint tasks of order serving and the emerging foundation model fine-tuning using various urban data. However, integrating the VCS AI task with the conventional order serving task is challenging, due to their inconsistent spatio-temporal characteristics: (i) The distributions of ride orders and data point-of-interests (PoIs) may not coincide in geography, both following a priori unknown patterns; (ii) they have distinct forms of temporal effects, i.e., prolonged waiting makes orders become instantly invalid while data with increased staleness gradually reduces its utility for model fine-tuning.To overcome these obstacles, we propose an online framework based on multi-agent reinforcement learning (MARL) with careful augmentation. A new quality-of-service (QoS) metric is designed to characterize and balance the utility of the two joint tasks, under the effects of varying data volumes and staleness. We also integrate graph neural networks (GNNs) with MARL to enhance state representations, capturing graph-structured, time-varying dependencies among vehicles and across locations. Extensive experiments on our testbed simulator, utilizing various real-world foundation model fine-tuning tasks and the New York City Taxi ride order dataset, demonstrate the advantage of our proposed method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04399v1-abstract-full').style.display = 'none'; document.getElementById('2502.04399v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.04296">arXiv:2502.04296</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.04296">pdf</a>, <a href="https://arxiv.org/format/2502.04296">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Learning Real-World Action-Video Dynamics with Heterogeneous Masked Autoregression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+L">Lirui Wang</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+K">Kevin Zhao</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+C">Chaoqi Liu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xinlei Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.04296v1-abstract-short" style="display: inline;"> We propose Heterogeneous Masked Autoregression (HMA) for modeling action-video dynamics to generate high-quality data and evaluation in scaling robot learning. Building interactive video world models and policies for robotics is difficult due to the challenge of handling diverse settings while maintaining computational efficiency to run in real time. HMA uses heterogeneous pre-training from observ&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04296v1-abstract-full').style.display = 'inline'; document.getElementById('2502.04296v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.04296v1-abstract-full" style="display: none;"> We propose Heterogeneous Masked Autoregression (HMA) for modeling action-video dynamics to generate high-quality data and evaluation in scaling robot learning. Building interactive video world models and policies for robotics is difficult due to the challenge of handling diverse settings while maintaining computational efficiency to run in real time. HMA uses heterogeneous pre-training from observations and action sequences across different robotic embodiments, domains, and tasks. HMA uses masked autoregression to generate quantized or soft tokens for video predictions. \ourshort achieves better visual fidelity and controllability than the previous robotic video generation models with 15 times faster speed in the real world. After post-training, this model can be used as a video simulator from low-level action inputs for evaluating policies and generating synthetic data. See this link https://liruiw.github.io/hma for more information. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04296v1-abstract-full').style.display = 'none'; document.getElementById('2502.04296v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Website: https://liruiw.github.io/hma/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03914">arXiv:2502.03914</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.03914">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> A Flexible FBG-Based Contact Force Sensor for Robotic Gripping Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Lai%2C+W">Wenjie Lai</a>, <a href="/search/?searchtype=author&amp;query=Nguyen%2C+H+D">Huu Duoc Nguyen</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+J">Jiajun Liu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xingyu Chen</a>, <a href="/search/?searchtype=author&amp;query=Phee%2C+S+J">Soo Jay Phee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03914v1-abstract-short" style="display: inline;"> Soft robotic grippers demonstrate great potential for gently and safely handling objects; however, their full potential for executing precise and secure grasping has been limited by the lack of integrated sensors, leading to problems such as slippage and excessive force exertion. To address this challenge, we present a small and highly sensitive Fiber Bragg Grating-based force sensor designed for&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03914v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03914v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03914v1-abstract-full" style="display: none;"> Soft robotic grippers demonstrate great potential for gently and safely handling objects; however, their full potential for executing precise and secure grasping has been limited by the lack of integrated sensors, leading to problems such as slippage and excessive force exertion. To address this challenge, we present a small and highly sensitive Fiber Bragg Grating-based force sensor designed for accurate contact force measurement. The flexible force sensor comprises a 3D-printed TPU casing with a small bump and uvula structure, a dual FBG array, and a protective tube. A series of tests have been conducted to evaluate the effectiveness of the proposed force sensor, including force calibration, repeatability test, hysteresis study, force measurement comparison, and temperature calibration and compensation tests. The results demonstrated good repeatability, with a force measurement range of 4.69 N, a high sensitivity of approximately 1169.04 pm/N, a root mean square error (RMSE) of 0.12 N, and a maximum hysteresis of 4.83%. When compared to a commercial load cell, the sensor showed a percentage error of 2.56% and an RMSE of 0.14 N. Besides, the proposed sensor validated its temperature compensation effectiveness, with a force RMSE of 0.01 N over a temperature change of 11 Celsius degree. The sensor was integrated with a soft grow-and-twine gripper to monitor interaction forces between different objects and the robotic gripper. Closed-loop force control was applied during automated pick-and-place tasks and significantly improved gripping stability, as demonstrated in tests. This force sensor can be used across manufacturing, agriculture, healthcare (like prosthetic hands), logistics, and packaging, to provide situation awareness and higher operational efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03914v1-abstract-full').style.display = 'none'; document.getElementById('2502.03914v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03828">arXiv:2502.03828</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.03828">pdf</a>, <a href="https://arxiv.org/ps/2502.03828">ps</a>, <a href="https://arxiv.org/format/2502.03828">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Observation of $D^+\to \bar K_1(1270)^0渭^+谓_渭$ and $D^0\to K_1(1270)^-渭^+谓_渭$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (646 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03828v1-abstract-short" style="display: inline;"> By analyzing 7.93 $\rm fb^{-1}$ of $e^+e^-$ collision data collected at the center-of-mass energy of 3.773 GeV with the BESIII detector operated at the BEPCII collider, we report the observation of the semimuonic decays of $D^+\to \bar K_1(1270)^0渭^+谓_渭$ and $D^0\to K_1(1270)^-渭^+谓_渭$ with statistical significances of $12.5蟽$ and $6.0蟽$, respectively. Their decay branching fractions are determined&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03828v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03828v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03828v1-abstract-full" style="display: none;"> By analyzing 7.93 $\rm fb^{-1}$ of $e^+e^-$ collision data collected at the center-of-mass energy of 3.773 GeV with the BESIII detector operated at the BEPCII collider, we report the observation of the semimuonic decays of $D^+\to \bar K_1(1270)^0渭^+谓_渭$ and $D^0\to K_1(1270)^-渭^+谓_渭$ with statistical significances of $12.5蟽$ and $6.0蟽$, respectively. Their decay branching fractions are determined to be ${\mathcal B}[D^{+}\to \bar{K}_1(1270)^0 渭^{+}谓_渭]=(2.36\pm0.20^{+0.18}_{-0.27}\pm 0.48)\times10^{-3}$ and ${\mathcal B}[D^{0}\to K_1(1270)^{-} 渭^{+}谓_渭]=(0.78\pm0.11^{+0.05}_{-0.09}\pm 0.15)\times10^{-3}$, where the first and second uncertainties are statistical and systematic, respectively, and the third originates from the input branching fraction of $\bar K_{1}(1270)^0\to K^- 蟺^+蟺^0$ or $K_1(1270)^-\to K^-蟺^+蟺^-$. Combining our branching fractions with the previous measurements of ${\mathcal B}[D^+\to \bar K_1(1270)^0e^+谓_{e}]$ and ${\mathcal B}[D^0\to K_1(1270)^-e^+谓_{e}]$, we determine the branching fraction ratios to be ${\mathcal B}[D^+\to \bar K_1(1270)^0渭^+谓_渭]/{\mathcal B}[D^+\to \bar K_1(1270)^0e^+谓_{e}]=1.03 \pm 0.14 \substack{+0.11\\-0.15}$ and ${\mathcal B}[D^0\to K_1(1270)^-渭^+谓_渭]/{\mathcal B}[D^0\to K_1(1270)^-e^+谓_{e}]=0.74\pm 0.13 \substack{+0.08\\-0.13}$. Using the branching fractions measured in this work and the world-average lifetimes of the $D^+$ and $D^0$ mesons, we determine the semimuonic partial decay width ratio to be $螕[D^+\to \bar K_1(1270)^0 渭^+谓_渭]/螕[D^0\to K_1(1270)^- 渭^+谓_渭]=1.22\pm 0.10\substack{+0.06\\-0.09}$, which is consistent with unity as predicted by isospin conservation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03828v1-abstract-full').style.display = 'none'; document.getElementById('2502.03828v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03574">arXiv:2502.03574</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.03574">pdf</a>, <a href="https://arxiv.org/ps/2502.03574">ps</a>, <a href="https://arxiv.org/format/2502.03574">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> </div> </div> <p class="title is-5 mathjax"> Pandora with Inaccurate Priors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Banihashem%2C+K">Kiarash Banihashem</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiang Chen</a>, <a href="/search/?searchtype=author&amp;query=Hajiaghayi%2C+M">MohammadTaghi Hajiaghayi</a>, <a href="/search/?searchtype=author&amp;query=Kim%2C+S">Sungchul Kim</a>, <a href="/search/?searchtype=author&amp;query=Mahadik%2C+K">Kanak Mahadik</a>, <a href="/search/?searchtype=author&amp;query=Rossi%2C+R">Ryan Rossi</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+T">Tong Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03574v1-abstract-short" style="display: inline;"> We investigate the role of inaccurate priors for the classical Pandora&#39;s box problem. In the classical Pandora&#39;s box problem we are given a set of boxes each with a known cost and an unknown value sampled from a known distribution. We investigate how inaccuracies in the beliefs can affect existing algorithms. Specifically, we assume that the knowledge of the underlying distribution has a small err&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03574v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03574v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03574v1-abstract-full" style="display: none;"> We investigate the role of inaccurate priors for the classical Pandora&#39;s box problem. In the classical Pandora&#39;s box problem we are given a set of boxes each with a known cost and an unknown value sampled from a known distribution. We investigate how inaccuracies in the beliefs can affect existing algorithms. Specifically, we assume that the knowledge of the underlying distribution has a small error in the Kolmogorov distance, and study how this affects the utility obtained by the optimal algorithm. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03574v1-abstract-full').style.display = 'none'; document.getElementById('2502.03574v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03229">arXiv:2502.03229</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.03229">pdf</a>, <a href="https://arxiv.org/format/2502.03229">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A Unified Framework for Semi-Supervised Image Segmentation and Registration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Li%2C+R">Ruizhe Li</a>, <a href="/search/?searchtype=author&amp;query=Figueredo%2C+G">Grazziela Figueredo</a>, <a href="/search/?searchtype=author&amp;query=Auer%2C+D">Dorothee Auer</a>, <a href="/search/?searchtype=author&amp;query=Dineen%2C+R">Rob Dineen</a>, <a href="/search/?searchtype=author&amp;query=Morgan%2C+P">Paul Morgan</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xin Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03229v1-abstract-short" style="display: inline;"> Semi-supervised learning, which leverages both annotated and unannotated data, is an efficient approach for medical image segmentation, where obtaining annotations for the whole dataset is time-consuming and costly. Traditional semi-supervised methods primarily focus on extracting features and learning data distributions from unannotated data to enhance model training. In this paper, we introduce&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03229v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03229v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03229v1-abstract-full" style="display: none;"> Semi-supervised learning, which leverages both annotated and unannotated data, is an efficient approach for medical image segmentation, where obtaining annotations for the whole dataset is time-consuming and costly. Traditional semi-supervised methods primarily focus on extracting features and learning data distributions from unannotated data to enhance model training. In this paper, we introduce a novel approach incorporating an image registration model to generate pseudo-labels for the unannotated data, producing more geometrically correct pseudo-labels to improve the model training. Our method was evaluated on a 2D brain data set, showing excellent performance even using only 1\% of the annotated data. The results show that our approach outperforms conventional semi-supervised segmentation methods (e.g. teacher-student model), particularly in a low percentage of annotation scenario. GitHub: https://github.com/ruizhe-l/UniSegReg. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03229v1-abstract-full').style.display = 'none'; document.getElementById('2502.03229v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at IEEE International Symposium on Biomedical Imaging (ISBI) 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03104">arXiv:2502.03104</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.03104">pdf</a>, <a href="https://arxiv.org/format/2502.03104">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Bellman Error Centering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xingguo Chen</a>, <a href="/search/?searchtype=author&amp;query=Gong%2C+Y">Yu Gong</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+S">Shangdong Yang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+W">Wenhao Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03104v1-abstract-short" style="display: inline;"> This paper revisits the recently proposed reward centering algorithms including simple reward centering (SRC) and value-based reward centering (VRC), and points out that SRC is indeed the reward centering, while VRC is essentially Bellman error centering (BEC). Based on BEC, we provide the centered fixpoint for tabular value functions, as well as the centered TD fixpoint for linear value function&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03104v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03104v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03104v1-abstract-full" style="display: none;"> This paper revisits the recently proposed reward centering algorithms including simple reward centering (SRC) and value-based reward centering (VRC), and points out that SRC is indeed the reward centering, while VRC is essentially Bellman error centering (BEC). Based on BEC, we provide the centered fixpoint for tabular value functions, as well as the centered TD fixpoint for linear value function approximation. We design the on-policy CTD algorithm and the off-policy CTDC algorithm, and prove the convergence of both algorithms. Finally, we experimentally validate the stability of our proposed algorithms. Bellman error centering facilitates the extension to various reinforcement learning algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03104v1-abstract-full').style.display = 'none'; document.getElementById('2502.03104v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03072">arXiv:2502.03072</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.03072">pdf</a>, <a href="https://arxiv.org/format/2502.03072">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> RoboGrasp: A Universal Grasping Policy for Robust Robotic Control </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+Y">Yiqi Huang</a>, <a href="/search/?searchtype=author&amp;query=Davies%2C+T">Travis Davies</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+J">Jiahuan Yan</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiang Chen</a>, <a href="/search/?searchtype=author&amp;query=Tian%2C+Y">Yu Tian</a>, <a href="/search/?searchtype=author&amp;query=Hu%2C+L">Luhui Hu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03072v1-abstract-short" style="display: inline;"> Imitation learning and world models have shown significant promise in advancing generalizable robotic learning, with robotic grasping remaining a critical challenge for achieving precise manipulation. Existing methods often rely heavily on robot arm state data and RGB images, leading to overfitting to specific object shapes or positions. To address these limitations, we propose RoboGrasp, a univer&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03072v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03072v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03072v1-abstract-full" style="display: none;"> Imitation learning and world models have shown significant promise in advancing generalizable robotic learning, with robotic grasping remaining a critical challenge for achieving precise manipulation. Existing methods often rely heavily on robot arm state data and RGB images, leading to overfitting to specific object shapes or positions. To address these limitations, we propose RoboGrasp, a universal grasping policy framework that integrates pretrained grasp detection models with robotic learning. By leveraging robust visual guidance from object detection and segmentation tasks, RoboGrasp significantly enhances grasp precision, stability, and generalizability, achieving up to 34% higher success rates in few-shot learning and grasping box prompt tasks. Built on diffusion-based methods, RoboGrasp is adaptable to various robotic learning paradigms, enabling precise and reliable manipulation across diverse and complex scenarios. This framework represents a scalable and versatile solution for tackling real-world challenges in robotic grasping. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03072v1-abstract-full').style.display = 'none'; document.getElementById('2502.03072v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03017">arXiv:2502.03017</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.03017">pdf</a>, <a href="https://arxiv.org/format/2502.03017">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Nuclear Experiment">nucl-ex</span> </div> </div> <p class="title is-5 mathjax"> Search for Double Beta Decay of $^{136}$Xe to the $0^+_1$ Excited State of $^{136}$Ba with PandaX-4T </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=PandaX+Collaboration"> PandaX Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Luo%2C+L">Lingyin Luo</a>, <a href="/search/?searchtype=author&amp;query=Bo%2C+Z">Zihao Bo</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+W">Wei Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xun Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yunhua Chen</a>, <a href="/search/?searchtype=author&amp;query=Cheng%2C+Z">Zhaokan Cheng</a>, <a href="/search/?searchtype=author&amp;query=Cui%2C+X">Xiangyi Cui</a>, <a href="/search/?searchtype=author&amp;query=Fang%2C+Y">Yingji Fang</a>, <a href="/search/?searchtype=author&amp;query=Fang%2C+D">Deqing Fang</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+Z">Zhixing Gao</a>, <a href="/search/?searchtype=author&amp;query=Geng%2C+L">Lisheng Geng</a>, <a href="/search/?searchtype=author&amp;query=Giboni%2C+K">Karl Giboni</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+X">Xunan Guo</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+X">Xuyuan Guo</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+Z">Zichao Guo</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+C">Chencheng Han</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+K">Ke Han</a>, <a href="/search/?searchtype=author&amp;query=He%2C+C">Changda He</a>, <a href="/search/?searchtype=author&amp;query=He%2C+J">Jinrong He</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+D">Di Huang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+H">Houqi Huang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+J">Junting Huang</a>, <a href="/search/?searchtype=author&amp;query=Hou%2C+R">Ruquan Hou</a>, <a href="/search/?searchtype=author&amp;query=Hou%2C+Y">Yu Hou</a> , et al. (76 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03017v2-abstract-short" style="display: inline;"> We perform a search of double beta decay of $^{136}$Xe to the excited state, $0^+_1$, of $^{136}$Ba (2$谓尾尾$-0$_1^+$), using the dual-phase xenon detector of PandaX-4T with the first 94.9-day commissioning data. The multi-site events are reconstructed up to the MeV energy scale, which helps to improve the background model significantly. The background contribution from the stainless steel platform&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03017v2-abstract-full').style.display = 'inline'; document.getElementById('2502.03017v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03017v2-abstract-full" style="display: none;"> We perform a search of double beta decay of $^{136}$Xe to the excited state, $0^+_1$, of $^{136}$Ba (2$谓尾尾$-0$_1^+$), using the dual-phase xenon detector of PandaX-4T with the first 94.9-day commissioning data. The multi-site events are reconstructed up to the MeV energy scale, which helps to improve the background model significantly. The background contribution from the stainless steel platform outside PandaX-4T cryostat is evaluated for the first time. No significant evidence for 2$谓尾尾$-0$_1^+$ is observed, resulting in a lower limit of $T_{1/2}^{2 谓尾尾-0_1^+} &gt; 7.5 \times 10^{22}$ yr at the 90% confidence level. This is the first experimental limit on such a rare decay in a natural xenon-based detector. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03017v2-abstract-full').style.display = 'none'; document.getElementById('2502.03017v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.02995">arXiv:2502.02995</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.02995">pdf</a>, <a href="https://arxiv.org/format/2502.02995">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Astrophysical Phenomena">astro-ph.HE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="General Relativity and Quantum Cosmology">gr-qc</span> </div> </div> <p class="title is-5 mathjax"> Probing intermediate-mass black hole binaries with the Lunar Gravitational-wave Antenna </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Song%2C+H">Hanlin Song</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+H">Han Yan</a>, <a href="/search/?searchtype=author&amp;query=Kang%2C+Y">Yacheng Kang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xian Chen</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+J">Junjie Zhao</a>, <a href="/search/?searchtype=author&amp;query=Shao%2C+L">Lijing Shao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.02995v1-abstract-short" style="display: inline;"> New concepts for observing the gravitational waves (GWs) using a detector on the Moon, such as the Lunar Gravitational-wave Antenna (LGWA), have gained increasing attention. By utilizing the Moon as a giant antenna, the LGWA is expected to detect GWs in the frequency range from 1 millihertz (mHz) to several hertz, with optimal sensitivity in the decihertz band. Despite the debated formation and ev&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.02995v1-abstract-full').style.display = 'inline'; document.getElementById('2502.02995v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.02995v1-abstract-full" style="display: none;"> New concepts for observing the gravitational waves (GWs) using a detector on the Moon, such as the Lunar Gravitational-wave Antenna (LGWA), have gained increasing attention. By utilizing the Moon as a giant antenna, the LGWA is expected to detect GWs in the frequency range from 1 millihertz (mHz) to several hertz, with optimal sensitivity in the decihertz band. Despite the debated formation and evolution channel of intermediate-mass black holes (IMBHs) with masses in the range of $[10^2, 10^5]\ {\rm M_\odot}$, binary systems containing at least one IMBH are widely believed to generate GWs spanning from mHz to a few Hz, making them a key scientific target for the LGWA. We explore the detectability of IMBH binaries with the LGWA in this work. The LGWA is more sensitive to nearby binaries (i.e. with redshift $z\lesssim0.5$) with the primary mass $m_1 \in [10^4, 10^5] \ {\rm M_\odot}$, while it prefers distant binaries (i.e. $z \gtrsim 5$) with $m_1 \in [10^3, 10^4] \ {\rm M_\odot}$. Considering a signal-to-noise ratio threshold of 10, our results imply that the LGWA can detect IMBH binaries up to $z \sim \mathcal{O}(10)$. We further show that the LGWA can constrain the primary mass with relative errors $\lesssim 0.1\%$ for binaries at $z \lesssim 0.5$. Furthermore, we show that the IMBH binaries at $z \lesssim 0.1$ can be used to constrain redshift with relative errors $\lesssim 10\%$, and those with $m_1 \in [10^4, 10^5] \ {\rm M_\odot}$ can be localized by the LGWA to be within $\mathcal{O} (10)$ $\rm deg^2$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.02995v1-abstract-full').style.display = 'none'; document.getElementById('2502.02995v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 7 figures</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Chen%2C+X&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Chen%2C+X&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Chen%2C+X&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Chen%2C+X&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Chen%2C+X&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Chen%2C+X&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10