CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 1,889 results for author: <span class="mathjax">Fu, Y</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/" aria-role="search"> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Fu, Y"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Fu%2C+Y&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Fu, Y"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08929">arXiv:2502.08929</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.08929">pdf</a>, <a href="https://arxiv.org/ps/2502.08929">ps</a>, <a href="https://arxiv.org/format/2502.08929">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Precise Measurement of the $蠂_{c0}$ Resonance Parameters and Branching Fractions of $蠂_{c0,c2}\to蟺^+蟺^-/K^+K^-$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a>, <a href="/search/?searchtype=author&amp;query=Brueggemann%2C+A">A. Brueggemann</a> , et al. (648 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08929v1-abstract-short" style="display: inline;"> By analyzing a $蠄(3686)$ data sample containing $(107.7\pm0.6)\times10^{6}$ events taken with the BESIII detector at the BEPCII storage ring in 2009, the $蠂_{c0}$ resonance parameters are precisely measured using $蠂_{c0,c2} \to 蟺^+蟺^-/K^+K^-$ events. The mass of $蠂_{c0}$ is determined to be $M(蠂_{c0})=(3415.67\pm0.07\pm0.06\pm0.07$)~MeV/$c^2$, and its full width is&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08929v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08929v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08929v1-abstract-full" style="display: none;"> By analyzing a $蠄(3686)$ data sample containing $(107.7\pm0.6)\times10^{6}$ events taken with the BESIII detector at the BEPCII storage ring in 2009, the $蠂_{c0}$ resonance parameters are precisely measured using $蠂_{c0,c2} \to 蟺^+蟺^-/K^+K^-$ events. The mass of $蠂_{c0}$ is determined to be $M(蠂_{c0})=(3415.67\pm0.07\pm0.06\pm0.07$)~MeV/$c^2$, and its full width is $螕(蠂_{c0})=(12.44\pm0.12\pm0.12)~{\rm MeV}$, where the first uncertainty is statistical, the second systematic, and the third for mass comes from $蠂_{c2}$ mass uncertainty. These measurements improve the precision of $蠂_{c0}$ mass by a factor of four and width by one order of magnitude over the previous individual measurements, and significantly boost our knowledge about the charmonium spectrum. Together with additional $(345.4\pm2.6)\times10^{6}$ $蠄(3686)$ data events taken in 2012, the decay branching fractions of $蠂_{c0,c2}\to蟺^+蟺^-/K^+K^-$ are measured as well, with precision improved by a factor of three compared to previous measurements. These $蠂_{c0}$ decay branching fractions provide important inputs for the study of glueballs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08929v1-abstract-full').style.display = 'none'; document.getElementById('2502.08929v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 1 figure</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08828">arXiv:2502.08828</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.08828">pdf</a>, <a href="https://arxiv.org/format/2502.08828">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> A Survey on Data-Centric AI: Tabular Learning from Reinforcement Learning and Generative AI Perspective </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ying%2C+W">Wangyang Ying</a>, <a href="/search/?searchtype=author&amp;query=Wei%2C+C">Cong Wei</a>, <a href="/search/?searchtype=author&amp;query=Gong%2C+N">Nanxu Gong</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+X">Xinyuan Wang</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+H">Haoyue Bai</a>, <a href="/search/?searchtype=author&amp;query=Malarkkan%2C+A+V">Arun Vignesh Malarkkan</a>, <a href="/search/?searchtype=author&amp;query=Dong%2C+S">Sixun Dong</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+D">Dongjie Wang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+D">Denghui Zhang</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yanjie Fu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08828v1-abstract-short" style="display: inline;"> Tabular data is one of the most widely used data formats across various domains such as bioinformatics, healthcare, and marketing. As artificial intelligence moves towards a data-centric perspective, improving data quality is essential for enhancing model performance in tabular data-driven applications. This survey focuses on data-driven tabular data optimization, specifically exploring reinforcem&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08828v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08828v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08828v1-abstract-full" style="display: none;"> Tabular data is one of the most widely used data formats across various domains such as bioinformatics, healthcare, and marketing. As artificial intelligence moves towards a data-centric perspective, improving data quality is essential for enhancing model performance in tabular data-driven applications. This survey focuses on data-driven tabular data optimization, specifically exploring reinforcement learning (RL) and generative approaches for feature selection and feature generation as fundamental techniques for refining data spaces. Feature selection aims to identify and retain the most informative attributes, while feature generation constructs new features to better capture complex data patterns. We systematically review existing generative methods for tabular data engineering, analyzing their latest advancements, real-world applications, and respective strengths and limitations. This survey emphasizes how RL-based and generative techniques contribute to the automation and intelligence of feature engineering. Finally, we summarize the existing challenges and discuss future research directions, aiming to provide insights that drive continued innovation in this field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08828v1-abstract-full').style.display = 'none'; document.getElementById('2502.08828v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08449">arXiv:2502.08449</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.08449">pdf</a>, <a href="https://arxiv.org/format/2502.08449">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> CordViP: Correspondence-based Visuomotor Policy for Dexterous Manipulation in Real-World </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yankai Fu</a>, <a href="/search/?searchtype=author&amp;query=Feng%2C+Q">Qiuxuan Feng</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+N">Ning Chen</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Z">Zichen Zhou</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+M">Mengzhen Liu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+M">Mingdong Wu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+T">Tianxing Chen</a>, <a href="/search/?searchtype=author&amp;query=Rong%2C+S">Shanyu Rong</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+J">Jiaming Liu</a>, <a href="/search/?searchtype=author&amp;query=Dong%2C+H">Hao Dong</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+S">Shanghang Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08449v1-abstract-short" style="display: inline;"> Achieving human-level dexterity in robots is a key objective in the field of robotic manipulation. Recent advancements in 3D-based imitation learning have shown promising results, providing an effective pathway to achieve this goal. However, obtaining high-quality 3D representations presents two key problems: (1) the quality of point clouds captured by a single-view camera is significantly affecte&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08449v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08449v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08449v1-abstract-full" style="display: none;"> Achieving human-level dexterity in robots is a key objective in the field of robotic manipulation. Recent advancements in 3D-based imitation learning have shown promising results, providing an effective pathway to achieve this goal. However, obtaining high-quality 3D representations presents two key problems: (1) the quality of point clouds captured by a single-view camera is significantly affected by factors such as camera resolution, positioning, and occlusions caused by the dexterous hand; (2) the global point clouds lack crucial contact information and spatial correspondences, which are necessary for fine-grained dexterous manipulation tasks. To eliminate these limitations, we propose CordViP, a novel framework that constructs and learns correspondences by leveraging the robust 6D pose estimation of objects and robot proprioception. Specifically, we first introduce the interaction-aware point clouds, which establish correspondences between the object and the hand. These point clouds are then used for our pre-training policy, where we also incorporate object-centric contact maps and hand-arm coordination information, effectively capturing both spatial and temporal dynamics. Our method demonstrates exceptional dexterous manipulation capabilities with an average success rate of 90\% in four real-world tasks, surpassing other baselines by a large margin. Experimental results also highlight the superior generalization and robustness of CordViP to different objects, viewpoints, and scenarios. Code and videos are available on https://aureleopku.github.io/CordViP. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08449v1-abstract-full').style.display = 'none'; document.getElementById('2502.08449v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.07531">arXiv:2502.07531</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.07531">pdf</a>, <a href="https://arxiv.org/format/2502.07531">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> VidCRAFT3: Camera, Object, and Lighting Control for Image-to-Video Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zheng%2C+S">Sixiao Zheng</a>, <a href="/search/?searchtype=author&amp;query=Peng%2C+Z">Zimian Peng</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yanpeng Zhou</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+Y">Yi Zhu</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+H">Hang Xu</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+X">Xiangru Huang</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yanwei Fu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.07531v2-abstract-short" style="display: inline;"> Recent image-to-video generation methods have demonstrated success in enabling control over one or two visual elements, such as camera trajectory or object motion. However, these methods are unable to offer control over multiple visual elements due to limitations in data and network efficacy. In this paper, we introduce VidCRAFT3, a novel framework for precise image-to-video generation that enable&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07531v2-abstract-full').style.display = 'inline'; document.getElementById('2502.07531v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.07531v2-abstract-full" style="display: none;"> Recent image-to-video generation methods have demonstrated success in enabling control over one or two visual elements, such as camera trajectory or object motion. However, these methods are unable to offer control over multiple visual elements due to limitations in data and network efficacy. In this paper, we introduce VidCRAFT3, a novel framework for precise image-to-video generation that enables control over camera motion, object motion, and lighting direction simultaneously. To better decouple control over each visual element, we propose the Spatial Triple-Attention Transformer, which integrates lighting direction, text, and image in a symmetric way. Since most real-world video datasets lack lighting annotations, we construct a high-quality synthetic video dataset, the VideoLightingDirection (VLD) dataset. This dataset includes lighting direction annotations and objects of diverse appearance, enabling VidCRAFT3 to effectively handle strong light transmission and reflection effects. Additionally, we propose a three-stage training strategy that eliminates the need for training data annotated with multiple visual elements (camera motion, object motion, and lighting direction) simultaneously. Extensive experiments on benchmark datasets demonstrate the efficacy of VidCRAFT3 in producing high-quality video content, surpassing existing state-of-the-art methods in terms of control granularity and visual coherence. All code and data will be publicly available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07531v2-abstract-full').style.display = 'none'; document.getElementById('2502.07531v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.07406">arXiv:2502.07406</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.07406">pdf</a>, <a href="https://arxiv.org/format/2502.07406">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Search for $e^+e^-\to K_S^0 K_S^0 h_c$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (642 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.07406v1-abstract-short" style="display: inline;"> Using $e^+e^-$ collision data at 13 center-of-mass energies ranging from 4.600 to 4.950 GeV collected with the BESIII detector, we search for the unmeasured $e^+e^-\to K_S^0 K_S^0 h_c$ process . No significant signal is observed, and the upper limits of the Born cross sections at each center-of-mass energy are presented. </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.07406v1-abstract-full" style="display: none;"> Using $e^+e^-$ collision data at 13 center-of-mass energies ranging from 4.600 to 4.950 GeV collected with the BESIII detector, we search for the unmeasured $e^+e^-\to K_S^0 K_S^0 h_c$ process . No significant signal is observed, and the upper limits of the Born cross sections at each center-of-mass energy are presented. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07406v1-abstract-full').style.display = 'none'; document.getElementById('2502.07406v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.07351">arXiv:2502.07351</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.07351">pdf</a>, <a href="https://arxiv.org/format/2502.07351">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Multi-Task-oriented Nighttime Haze Imaging Enhancer for Vision-driven Measurement Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+A">Ai Chen</a>, <a href="/search/?searchtype=author&amp;query=Lu%2C+Y">Yuxu Lu</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+D">Dong Yang</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+J">Junlin Zhou</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yan Fu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+D">Duanbing Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.07351v1-abstract-short" style="display: inline;"> Salient object detection (SOD) plays a critical role in vision-driven measurement systems (VMS), facilitating the detection and segmentation of key visual elements in an image. However, adverse imaging conditions such as haze during the day, low light, and haze at night severely degrade image quality, and complicating the SOD process. To address these challenges, we propose a multi-task-oriented n&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07351v1-abstract-full').style.display = 'inline'; document.getElementById('2502.07351v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.07351v1-abstract-full" style="display: none;"> Salient object detection (SOD) plays a critical role in vision-driven measurement systems (VMS), facilitating the detection and segmentation of key visual elements in an image. However, adverse imaging conditions such as haze during the day, low light, and haze at night severely degrade image quality, and complicating the SOD process. To address these challenges, we propose a multi-task-oriented nighttime haze imaging enhancer (MToIE), which integrates three tasks: daytime dehazing, low-light enhancement, and nighttime dehazing. The MToIE incorporates two key innovative components: First, the network employs a task-oriented node learning mechanism to handle three specific degradation types: day-time haze, low light, and night-time haze conditions, with an embedded self-attention module enhancing its performance in nighttime imaging. In addition, multi-receptive field enhancement module that efficiently extracts multi-scale features through three parallel depthwise separable convolution branches with different dilation rates, capturing comprehensive spatial information with minimal computational overhead. To ensure optimal image reconstruction quality and visual characteristics, we suggest a hybrid loss function. Extensive experiments on different types of weather/imaging conditions illustrate that MToIE surpasses existing methods, significantly enhancing the accuracy and reliability of vision systems across diverse imaging scenarios. The code is available at https://github.com/Ai-Chen-Lab/MToIE. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.07351v1-abstract-full').style.display = 'none'; document.getElementById('2502.07351v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06919">arXiv:2502.06919</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.06919">pdf</a>, <a href="https://arxiv.org/format/2502.06919">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Select before Act: Spatially Decoupled Action Repetition for Continuous Control </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Nie%2C+B">Buqing Nie</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yangqing Fu</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+Y">Yue Gao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06919v1-abstract-short" style="display: inline;"> Reinforcement Learning (RL) has achieved remarkable success in various continuous control tasks, such as robot manipulation and locomotion. Different to mainstream RL which makes decisions at individual steps, recent studies have incorporated action repetition into RL, achieving enhanced action persistence with improved sample efficiency and superior performance. However, existing methods treat al&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06919v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06919v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06919v1-abstract-full" style="display: none;"> Reinforcement Learning (RL) has achieved remarkable success in various continuous control tasks, such as robot manipulation and locomotion. Different to mainstream RL which makes decisions at individual steps, recent studies have incorporated action repetition into RL, achieving enhanced action persistence with improved sample efficiency and superior performance. However, existing methods treat all action dimensions as a whole during repetition, ignoring variations among them. This constraint leads to inflexibility in decisions, which reduces policy agility with inferior effectiveness. In this work, we propose a novel repetition framework called SDAR, which implements Spatially Decoupled Action Repetition through performing closed-loop act-or-repeat selection for each action dimension individually. SDAR achieves more flexible repetition strategies, leading to an improved balance between action persistence and diversity. Compared to existing repetition frameworks, SDAR is more sample efficient with higher policy performance and reduced action fluctuation. Experiments are conducted on various continuous control scenarios, demonstrating the effectiveness of spatially decoupled repetition design proposed in this work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06919v1-abstract-full').style.display = 'none'; document.getElementById('2502.06919v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICLR 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.05749">arXiv:2502.05749</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.05749">pdf</a>, <a href="https://arxiv.org/format/2502.05749">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> UniDB: A Unified Diffusion Bridge Framework via Stochastic Optimal Control </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhu%2C+K">Kaizhen Zhu</a>, <a href="/search/?searchtype=author&amp;query=Pan%2C+M">Mokai Pan</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+Y">Yuexin Ma</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yanwei Fu</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+J">Jingyi Yu</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+J">Jingya Wang</a>, <a href="/search/?searchtype=author&amp;query=Shi%2C+Y">Ye Shi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.05749v2-abstract-short" style="display: inline;"> Recent advances in diffusion bridge models leverage Doob&#39;s $h$-transform to establish fixed endpoints between distributions, demonstrating promising results in image translation and restoration tasks. However, these approaches frequently produce blurred or excessively smoothed image details and lack a comprehensive theoretical foundation to explain these shortcomings. To address these limitations,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05749v2-abstract-full').style.display = 'inline'; document.getElementById('2502.05749v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.05749v2-abstract-full" style="display: none;"> Recent advances in diffusion bridge models leverage Doob&#39;s $h$-transform to establish fixed endpoints between distributions, demonstrating promising results in image translation and restoration tasks. However, these approaches frequently produce blurred or excessively smoothed image details and lack a comprehensive theoretical foundation to explain these shortcomings. To address these limitations, we propose UniDB, a unified framework for diffusion bridges based on Stochastic Optimal Control (SOC). UniDB formulates the problem through an SOC-based optimization and derives a closed-form solution for the optimal controller, thereby unifying and generalizing existing diffusion bridge models. We demonstrate that existing diffusion bridges employing Doob&#39;s $h$-transform constitute a special case of our framework, emerging when the terminal penalty coefficient in the SOC cost function tends to infinity. By incorporating a tunable terminal penalty coefficient, UniDB achieves an optimal balance between control costs and terminal penalties, substantially improving detail preservation and output quality. Notably, UniDB seamlessly integrates with existing diffusion bridge models, requiring only minimal code modifications. Extensive experiments across diverse image restoration tasks validate the superiority and adaptability of the proposed framework. Our code is available at https://github.com/UniDB-SOC/UniDB/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05749v2-abstract-full').style.display = 'none'; document.getElementById('2502.05749v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.05480">arXiv:2502.05480</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.05480">pdf</a>, <a href="https://arxiv.org/format/2502.05480">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> </div> </div> <p class="title is-5 mathjax"> Finding Quasars Behind the Galactic Plane: Spectroscopic Identifications of ~1300 New Quasars at |b|&lt;=20 degree from LAMOST DR10 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huo%2C+Z">Zhi-Ying Huo</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yuming Fu</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+Y">Yang Huang</a>, <a href="/search/?searchtype=author&amp;query=Yuan%2C+H">Haibo Yuan</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+X">Xue-Bing Wu</a>, <a href="/search/?searchtype=author&amp;query=Xiang%2C+M">Maosheng Xiang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+X">Xiao-Wei Liu</a>, <a href="/search/?searchtype=author&amp;query=Lyu%2C+B">Bing Lyu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+H">Hao Wu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+J">Jian Li</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Yanxia Zhang</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+Y">Yanli Ai</a>, <a href="/search/?searchtype=author&amp;query=Jin%2C+J">Junjie Jin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.05480v1-abstract-short" style="display: inline;"> Quasars behind the Galactic plane (GPQs) are excellent tracers to probe the chemistry and kinematics of the interstellar/intergalactic medium (ISM/IGM) of the Milky Way along sight lines via absorption line spectroscopy. Moreover, the quasars located at low Galactic latitudes will fill the gap in the spatial distribution of known quasars near the Galactic plane, and can be used to construct an ast&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05480v1-abstract-full').style.display = 'inline'; document.getElementById('2502.05480v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.05480v1-abstract-full" style="display: none;"> Quasars behind the Galactic plane (GPQs) are excellent tracers to probe the chemistry and kinematics of the interstellar/intergalactic medium (ISM/IGM) of the Milky Way along sight lines via absorption line spectroscopy. Moreover, the quasars located at low Galactic latitudes will fill the gap in the spatial distribution of known quasars near the Galactic plane, and can be used to construct an astrometric reference frame for accurate measurements of proper motions (PMs) of stars, and substructures of the Milky Way. We started a survey of background quasars in the low Galactic latitude region since the LAMOST phase II survey in 2017. Quasar candidates have been selected from the optical and infrared photometric data of Pan-STARRS1 and WISE surveys based on their variability and color properties. In this paper, we present a sample of 1982 spectroscopically confirmed GPQs with |b| &lt;= 20 degree based on LAMOST Data Release 10 (DR10). Among them, 1338 are newly discovered. Most GPQs are located around 240&lt;l&lt;90 degree, and the spatial distributions are non-uniform. These GPQs have a magnitude distribution with a peak at i-mag 19.0, and mostly around 18.0-19.5mag. The peak of redshift distributions is around ~1.5, and most GPQs have redshifts between 0.3 and 2.5. Our finding demonstrates the potential discovery space for the GPQs from the spectroscopic surveys and the promising applications for future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05480v1-abstract-full').style.display = 'none'; document.getElementById('2502.05480v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 7 figures and 2 tables, accepted for publication in ApJS</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03964">arXiv:2502.03964</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.03964">pdf</a>, <a href="https://arxiv.org/format/2502.03964">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> &#34;It Warned Me Just at the Right Moment&#34;: Exploring LLM-based Real-time Detection of Phone Scams </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Shen%2C+Z">Zitong Shen</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+S">Sineng Yan</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Youqian Zhang</a>, <a href="/search/?searchtype=author&amp;query=Luo%2C+X">Xiapu Luo</a>, <a href="/search/?searchtype=author&amp;query=Ngai%2C+G">Grace Ngai</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+E+Y">Eugene Yujun Fu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03964v1-abstract-short" style="display: inline;"> Despite living in the era of the internet, phone-based scams remain one of the most prevalent forms of scams. These scams aim to exploit victims for financial gain, causing both monetary losses and psychological distress. While governments, industries, and academia have actively introduced various countermeasures, scammers also continue to evolve their tactics, making phone scams a persistent thre&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03964v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03964v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03964v1-abstract-full" style="display: none;"> Despite living in the era of the internet, phone-based scams remain one of the most prevalent forms of scams. These scams aim to exploit victims for financial gain, causing both monetary losses and psychological distress. While governments, industries, and academia have actively introduced various countermeasures, scammers also continue to evolve their tactics, making phone scams a persistent threat. To combat these increasingly sophisticated scams, detection technologies must also advance. In this work, we propose a framework for modeling scam calls and introduce an LLM-based real-time detection approach, which assesses fraudulent intent in conversations, further providing immediate warnings to users to mitigate harm. Through experiments, we evaluate the method&#39;s performance and analyze key factors influencing its effectiveness. This analysis enables us to refine the method to improve precision while exploring the trade-off between recall and timeliness, paving the way for future directions in this critical area of research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03964v1-abstract-full').style.display = 'none'; document.getElementById('2502.03964v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 4 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.5 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03828">arXiv:2502.03828</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.03828">pdf</a>, <a href="https://arxiv.org/ps/2502.03828">ps</a>, <a href="https://arxiv.org/format/2502.03828">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Observation of $D^+\to \bar K_1(1270)^0渭^+谓_渭$ and $D^0\to K_1(1270)^-渭^+谓_渭$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (646 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03828v1-abstract-short" style="display: inline;"> By analyzing 7.93 $\rm fb^{-1}$ of $e^+e^-$ collision data collected at the center-of-mass energy of 3.773 GeV with the BESIII detector operated at the BEPCII collider, we report the observation of the semimuonic decays of $D^+\to \bar K_1(1270)^0渭^+谓_渭$ and $D^0\to K_1(1270)^-渭^+谓_渭$ with statistical significances of $12.5蟽$ and $6.0蟽$, respectively. Their decay branching fractions are determined&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03828v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03828v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03828v1-abstract-full" style="display: none;"> By analyzing 7.93 $\rm fb^{-1}$ of $e^+e^-$ collision data collected at the center-of-mass energy of 3.773 GeV with the BESIII detector operated at the BEPCII collider, we report the observation of the semimuonic decays of $D^+\to \bar K_1(1270)^0渭^+谓_渭$ and $D^0\to K_1(1270)^-渭^+谓_渭$ with statistical significances of $12.5蟽$ and $6.0蟽$, respectively. Their decay branching fractions are determined to be ${\mathcal B}[D^{+}\to \bar{K}_1(1270)^0 渭^{+}谓_渭]=(2.36\pm0.20^{+0.18}_{-0.27}\pm 0.48)\times10^{-3}$ and ${\mathcal B}[D^{0}\to K_1(1270)^{-} 渭^{+}谓_渭]=(0.78\pm0.11^{+0.05}_{-0.09}\pm 0.15)\times10^{-3}$, where the first and second uncertainties are statistical and systematic, respectively, and the third originates from the input branching fraction of $\bar K_{1}(1270)^0\to K^- 蟺^+蟺^0$ or $K_1(1270)^-\to K^-蟺^+蟺^-$. Combining our branching fractions with the previous measurements of ${\mathcal B}[D^+\to \bar K_1(1270)^0e^+谓_{e}]$ and ${\mathcal B}[D^0\to K_1(1270)^-e^+谓_{e}]$, we determine the branching fraction ratios to be ${\mathcal B}[D^+\to \bar K_1(1270)^0渭^+谓_渭]/{\mathcal B}[D^+\to \bar K_1(1270)^0e^+谓_{e}]=1.03 \pm 0.14 \substack{+0.11\\-0.15}$ and ${\mathcal B}[D^0\to K_1(1270)^-渭^+谓_渭]/{\mathcal B}[D^0\to K_1(1270)^-e^+谓_{e}]=0.74\pm 0.13 \substack{+0.08\\-0.13}$. Using the branching fractions measured in this work and the world-average lifetimes of the $D^+$ and $D^0$ mesons, we determine the semimuonic partial decay width ratio to be $螕[D^+\to \bar K_1(1270)^0 渭^+谓_渭]/螕[D^0\to K_1(1270)^- 渭^+谓_渭]=1.22\pm 0.10\substack{+0.06\\-0.09}$, which is consistent with unity as predicted by isospin conservation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03828v1-abstract-full').style.display = 'none'; document.getElementById('2502.03828v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.17802">arXiv:2501.17802</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.17802">pdf</a>, <a href="https://arxiv.org/format/2501.17802">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> LEKA:LLM-Enhanced Knowledge Augmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhang%2C+X">Xinhao Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+J">Jinghan Zhang</a>, <a href="/search/?searchtype=author&amp;query=Mo%2C+F">Fengran Mo</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+D">Dongjie Wang</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yanjie Fu</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+K">Kunpeng Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.17802v1-abstract-short" style="display: inline;"> Humans excel in analogical learning and knowledge transfer and, more importantly, possess a unique understanding of identifying appropriate sources of knowledge. From a model&#39;s perspective, this presents an interesting challenge. If models could autonomously retrieve knowledge useful for transfer or decision-making to solve problems, they would transition from passively acquiring to actively acces&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.17802v1-abstract-full').style.display = 'inline'; document.getElementById('2501.17802v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.17802v1-abstract-full" style="display: none;"> Humans excel in analogical learning and knowledge transfer and, more importantly, possess a unique understanding of identifying appropriate sources of knowledge. From a model&#39;s perspective, this presents an interesting challenge. If models could autonomously retrieve knowledge useful for transfer or decision-making to solve problems, they would transition from passively acquiring to actively accessing and learning from knowledge. However, filling models with knowledge is relatively straightforward -- it simply requires more training and accessible knowledge bases. The more complex task is teaching models about which knowledge can be analogized and transferred. Therefore, we design a knowledge augmentation method LEKA for knowledge transfer that actively searches for suitable knowledge sources that can enrich the target domain&#39;s knowledge. This LEKA method extracts key information from textual information from the target domain, retrieves pertinent data from external data libraries, and harmonizes retrieved data with the target domain data in feature space and marginal probability measures. We validate the effectiveness of our approach through extensive experiments across various domains and demonstrate significant improvements over traditional methods in reducing computational costs, automating data alignment, and optimizing transfer learning outcomes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.17802v1-abstract-full').style.display = 'none'; document.getElementById('2501.17802v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.15447">arXiv:2501.15447</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.15447">pdf</a>, <a href="https://arxiv.org/ps/2501.15447">ps</a>, <a href="https://arxiv.org/format/2501.15447">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Observation of $h_{c}$ radiative decays to multiple light hadrons and the tensor state $f_2(1270)$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a>, <a href="/search/?searchtype=author&amp;query=Brueggemann%2C+A">A. Brueggemann</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+H">H. Cai</a> , et al. (666 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.15447v1-abstract-short" style="display: inline;"> Using $蠄(3686)\rightarrow 蟺^{0} h_{c}$ decays from a data sample of $(27.12\pm0.14)\times10^{8}$ $蠄(3686)$ events collected by the BESIII detector at the BEPCII collider, $h_c$ radiative decays to $纬蟺^{+}蟺^{-},~纬蟺^{+}蟺^{-}畏,~\gamma2(蟺^{+}蟺^{-})$, and $纬p\bar{p}$ are observed for the first time, each with a significance greater than $5蟽$. The corresponding branching fractions are measured. Furtherm&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15447v1-abstract-full').style.display = 'inline'; document.getElementById('2501.15447v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.15447v1-abstract-full" style="display: none;"> Using $蠄(3686)\rightarrow 蟺^{0} h_{c}$ decays from a data sample of $(27.12\pm0.14)\times10^{8}$ $蠄(3686)$ events collected by the BESIII detector at the BEPCII collider, $h_c$ radiative decays to $纬蟺^{+}蟺^{-},~纬蟺^{+}蟺^{-}畏,~\gamma2(蟺^{+}蟺^{-})$, and $纬p\bar{p}$ are observed for the first time, each with a significance greater than $5蟽$. The corresponding branching fractions are measured. Furthermore, intermediate states below 2.8 GeV/$c^{2}$ are investigated, leading to the first observation of the decay process of $h_c\rightarrow纬f_{2}(1270)\rightarrow纬蟺^{+}蟺^{-}$ with a significance of $5.5\,蟽$. This observation represents the first instance of $h_c$ radiative decay to a tensor state. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15447v1-abstract-full').style.display = 'none'; document.getElementById('2501.15447v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.15276">arXiv:2501.15276</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.15276">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Exploring the Collaborative Co-Creation Process with AI: A Case Study in Novice Music Production </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yue Fu</a>, <a href="/search/?searchtype=author&amp;query=Newman%2C+M">Michele Newman</a>, <a href="/search/?searchtype=author&amp;query=Going%2C+L">Lewis Going</a>, <a href="/search/?searchtype=author&amp;query=Feng%2C+Q">Qiuzi Feng</a>, <a href="/search/?searchtype=author&amp;query=Lee%2C+J+H">Jin Ha Lee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.15276v1-abstract-short" style="display: inline;"> Artificial intelligence is reshaping creative domains, yet its co-creative processes, especially in group settings with novice users, remain under explored. To bridge this gap, we conducted a case study in a college-level course where nine undergraduate students were tasked with creating three original music tracks using AI tools over 10 weeks. The study spanned the entire creative journey from id&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15276v1-abstract-full').style.display = 'inline'; document.getElementById('2501.15276v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.15276v1-abstract-full" style="display: none;"> Artificial intelligence is reshaping creative domains, yet its co-creative processes, especially in group settings with novice users, remain under explored. To bridge this gap, we conducted a case study in a college-level course where nine undergraduate students were tasked with creating three original music tracks using AI tools over 10 weeks. The study spanned the entire creative journey from ideation to releasing these songs on Spotify. Participants leveraged AI for music and lyric production, cover art, and distribution. Our findings highlight how AI transforms creative workflows: accelerating ideation but compressing the traditional preparation stage, and requiring novices to navigate a challenging idea selection and validation phase. We also identified a new &#34;collaging and refinement&#34; stage, where participants creatively combined diverse AI-generated outputs into cohesive works. Furthermore, AI influenced group social dynamics and role division among human creators. Based on these insights, we propose the Human-AI Co-Creation Stage Model and the Human-AI Agency Model, offering new perspectives on collaborative co-creation with AI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15276v1-abstract-full').style.display = 'none'; document.getElementById('2501.15276v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.15254">arXiv:2501.15254</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.15254">pdf</a>, <a href="https://arxiv.org/ps/2501.15254">ps</a>, <a href="https://arxiv.org/format/2501.15254">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Astrophysical Phenomena">astro-ph.HE</span> </div> </div> <p class="title is-5 mathjax"> Highly Variable Quasar Candidates Selected from 4XMM-DR13 with Machine Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+H">Heng Wang</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+Y">Yanli Ai</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Yanxia Zhang</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yuming Fu</a>, <a href="/search/?searchtype=author&amp;query=Wen%2C+W">Wenfeng Wen</a>, <a href="/search/?searchtype=author&amp;query=Dou%2C+L">Liming Dou</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+X">Xue-Bing Wu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+X">Xiangru Li</a>, <a href="/search/?searchtype=author&amp;query=Huo%2C+Z">Zhiying Huo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.15254v1-abstract-short" style="display: inline;"> We present a sample of 12 quasar candidates with highly variable soft X-ray emission from the 4th XMM-newton Serendipitous Source Catalog (4XMM-DR13) using random forest. We obtained optical to mid-IR photometric data for the 4XMM-DR13 sources by correlating the sample with the SDSS DR18 photometric database and the AllWISE database. By cross-matching this sample with known spectral catalogs from&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15254v1-abstract-full').style.display = 'inline'; document.getElementById('2501.15254v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.15254v1-abstract-full" style="display: none;"> We present a sample of 12 quasar candidates with highly variable soft X-ray emission from the 4th XMM-newton Serendipitous Source Catalog (4XMM-DR13) using random forest. We obtained optical to mid-IR photometric data for the 4XMM-DR13 sources by correlating the sample with the SDSS DR18 photometric database and the AllWISE database. By cross-matching this sample with known spectral catalogs from the SDSS and LAMOST surveys, we obtained a training data set containing stars, galaxies, and quasars. The random forest algorithm was trained to classify the XMM-WISE-SDSS sample. We further filtered the classified quasar candidates with $\it{Gaia}$ proper motion to remove stellar contaminants. Finally, 53,992 quasar candidates have been classified, with 10,210 known quasars matched in SIMBAD. The quasar candidates have systematically lower X-ray fluxes than quasars in the training set, which indicates the classifier is helpful to single out fainter quasars. We constructed a sample of 12 sources from these quasars candidates which changed their soft X-ray fluxes by a factor of 10 over $\sim$ 20 years in the 4XMM-newton survey. Our selected highly variable quasar candidates extend the quasar sample, characterized by extreme soft X-ray variability, to the optically faint end with magnitudes around $r \sim 22$. None of the 12 sources were detected in ROSAT observations. Given the flux limit of ROSAT, the result suggests that quasars exhibiting variations of more than two orders of magnitudes are extremely rare. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15254v1-abstract-full').style.display = 'none'; document.getElementById('2501.15254v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.14889">arXiv:2501.14889</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.14889">pdf</a>, <a href="https://arxiv.org/format/2501.14889">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Iterative Feature Space Optimization through Incremental Adaptive Evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wu%2C+Y">Yanping Wu</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+Y">Yanyong Huang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Z">Zhengzhang Chen</a>, <a href="/search/?searchtype=author&amp;query=Yao%2C+Z">Zijun Yao</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yanjie Fu</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+K">Kunpeng Liu</a>, <a href="/search/?searchtype=author&amp;query=Luo%2C+X">Xiao Luo</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+D">Dongjie Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.14889v1-abstract-short" style="display: inline;"> Iterative feature space optimization involves systematically evaluating and adjusting the feature space to improve downstream task performance. However, existing works suffer from three key limitations:1) overlooking differences among data samples leads to evaluation bias; 2) tailoring feature spaces to specific machine learning models results in overfitting and poor generalization; 3) requiring t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.14889v1-abstract-full').style.display = 'inline'; document.getElementById('2501.14889v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.14889v1-abstract-full" style="display: none;"> Iterative feature space optimization involves systematically evaluating and adjusting the feature space to improve downstream task performance. However, existing works suffer from three key limitations:1) overlooking differences among data samples leads to evaluation bias; 2) tailoring feature spaces to specific machine learning models results in overfitting and poor generalization; 3) requiring the evaluator to be retrained from scratch during each optimization iteration significantly reduces the overall efficiency of the optimization process. To bridge these gaps, we propose a gEneralized Adaptive feature Space Evaluator (EASE) to efficiently produce optimal and generalized feature spaces. This framework consists of two key components: Feature-Sample Subspace Generator and Contextual Attention Evaluator. The first component aims to decouple the information distribution within the feature space to mitigate evaluation bias. To achieve this, we first identify features most relevant to prediction tasks and samples most challenging for evaluation based on feedback from the subsequent evaluator. This decoupling strategy makes the evaluator consistently target the most challenging aspects of the feature space. The second component intends to incrementally capture evolving patterns of the feature space for efficient evaluation. We propose a weighted-sharing multi-head attention mechanism to encode key characteristics of the feature space into an embedding vector for evaluation. Moreover, the evaluator is updated incrementally, retaining prior evaluation knowledge while incorporating new insights, as consecutive feature spaces during the optimization process share partial information. Extensive experiments on fourteen real-world datasets demonstrate the effectiveness of the proposed framework. Our code and data are publicly available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.14889v1-abstract-full').style.display = 'none'; document.getElementById('2501.14889v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.14367">arXiv:2501.14367</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.14367">pdf</a>, <a href="https://arxiv.org/format/2501.14367">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Joint System Latency and Data Freshness Optimization for Cache-enabled Mobile Crowdsensing Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Shi%2C+K">Kexin Shi</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yaru Fu</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+Y">Yongna Guo</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+F+L">Fu Lee Wang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Yan Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.14367v1-abstract-short" style="display: inline;"> Mobile crowdsensing (MCS) networks enable large-scale data collection by leveraging the ubiquity of mobile devices. However, frequent sensing and data transmission can lead to significant resource consumption. To mitigate this issue, edge caching has been proposed as a solution for storing recently collected data. Nonetheless, this approach may compromise data freshness. In this paper, we investig&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.14367v1-abstract-full').style.display = 'inline'; document.getElementById('2501.14367v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.14367v1-abstract-full" style="display: none;"> Mobile crowdsensing (MCS) networks enable large-scale data collection by leveraging the ubiquity of mobile devices. However, frequent sensing and data transmission can lead to significant resource consumption. To mitigate this issue, edge caching has been proposed as a solution for storing recently collected data. Nonetheless, this approach may compromise data freshness. In this paper, we investigate the trade-off between re-using cached task results and re-sensing tasks in cache-enabled MCS networks, aiming to minimize system latency while maintaining information freshness. To this end, we formulate a weighted delay and age of information (AoI) minimization problem, jointly optimizing sensing decisions, user selection, channel selection, task allocation, and caching strategies. The problem is a mixed-integer non-convex programming problem which is intractable. Therefore, we decompose the long-term problem into sequential one-shot sub-problems and design a framework that optimizes system latency, task sensing decision, and caching strategy subproblems. When one task is re-sensing, the one-shot problem simplifies to the system latency minimization problem, which can be solved optimally. The task sensing decision is then made by comparing the system latency and AoI. Additionally, a Bayesian update strategy is developed to manage the cached task results. Building upon this framework, we propose a lightweight and time-efficient algorithm that makes real-time decisions for the long-term optimization problem. Extensive simulation results validate the effectiveness of our approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.14367v1-abstract-full').style.display = 'none'; document.getElementById('2501.14367v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.14206">arXiv:2501.14206</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.14206">pdf</a>, <a href="https://arxiv.org/ps/2501.14206">ps</a>, <a href="https://arxiv.org/format/2501.14206">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Cross section measurement of $e^{+}e^{-} \to f_{1}(1285)蟺^{+}蟺^{-}$ at center-of-mass energies between $3.808$ and $4.951\rm GeV$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (639 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.14206v1-abstract-short" style="display: inline;"> Using data samples collected by the \mbox{BESIII} detector located at the Beijing Electron Positron Collider, the cross sections of the process $e^+e^-\to f_{1}(1285)蟺^+蟺^-$ are measured at forty-five center-of-mass energies from $3.808$ to $4.951 {\rm GeV}$. An investigation on the cross section line shape is performed, and no significant structure is observed. </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.14206v1-abstract-full" style="display: none;"> Using data samples collected by the \mbox{BESIII} detector located at the Beijing Electron Positron Collider, the cross sections of the process $e^+e^-\to f_{1}(1285)蟺^+蟺^-$ are measured at forty-five center-of-mass energies from $3.808$ to $4.951 {\rm GeV}$. An investigation on the cross section line shape is performed, and no significant structure is observed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.14206v1-abstract-full').style.display = 'none'; document.getElementById('2501.14206v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.12665">arXiv:2501.12665</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.12665">pdf</a>, <a href="https://arxiv.org/format/2501.12665">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> </div> </div> <p class="title is-5 mathjax"> A Pilot Study for the CSST Slitless Spectroscopic Quasar Survey Based on Mock Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Pang%2C+Y">Yuxuan Pang</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+X">Xue-Bing Wu</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yuming Fu</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+R">Rui Zhu</a>, <a href="/search/?searchtype=author&amp;query=Lyu%2C+B">Bing Lyu</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+H">Huimei Wang</a>, <a href="/search/?searchtype=author&amp;query=Feng%2C+X">Xiaotong Feng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.12665v1-abstract-short" style="display: inline;"> The wide survey of the Chinese Space Station Telescope (CSST) will observe a large field of 17,500 $\text{deg}^2$. The GU, GV, and GI grism observations of CSST will cover a wavelength range from 2550 to 10000脜 at a resolution of $R\sim 200$ and a depth of about 22 AB magnitude for the continuum. In this paper, we present a pipeline to identify quasars and measure their physical properties with th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.12665v1-abstract-full').style.display = 'inline'; document.getElementById('2501.12665v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.12665v1-abstract-full" style="display: none;"> The wide survey of the Chinese Space Station Telescope (CSST) will observe a large field of 17,500 $\text{deg}^2$. The GU, GV, and GI grism observations of CSST will cover a wavelength range from 2550 to 10000脜 at a resolution of $R\sim 200$ and a depth of about 22 AB magnitude for the continuum. In this paper, we present a pipeline to identify quasars and measure their physical properties with the CSST mock data. We simulate the raw images and extract the one-dimensional grism spectra for quasars, galaxies, and stars with the r-band magnitudes of $18&lt;\text{m}_{\text{r}}&lt;22$ using the CSST Cycle 6 simulation code. Using a convolution neural network, we separate quasars from stars and galaxies. We measure the redshifts by identifying the strong emission lines of quasars. We also fit the 1D slitless spectra with QSOFITMORE to estimate the black hole masses and Eddington ratios. Our results show that the CSST slitless spectroscopy can effectively separate quasars with redshifts $z=0-5$ from other types of objects with an accuracy of 99\%. Among those successfully classified quasars, 90\% of them could have precise redshift measurements with $蟽_{\mathrm{NMAD}}=0.002$. The scatters of black hole masses and Eddington ratios from the spectral fittings are 0.13 and 0.15 dex, respectively. The metallicity diagnosis line ratios have a scatter of 0.1-0.2 dex. Our results show that the CSST slitless spectroscopy survey has the potential to discover about 0.9 million new quasars and provide important contributions to AGN science and cosmology. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.12665v1-abstract-full').style.display = 'none'; document.getElementById('2501.12665v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages, 15 figures; Accepted for the publication in The Astrophysical Journal (ApJ)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.11236">arXiv:2501.11236</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.11236">pdf</a>, <a href="https://arxiv.org/format/2501.11236">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> A New Formulation of Lipschitz Constrained With Functional Gradient Learning for GANs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wan%2C+C">Chang Wan</a>, <a href="/search/?searchtype=author&amp;query=Fan%2C+K">Ke Fan</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+X">Xinwei Sun</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yanwei Fu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+M">Minglu Li</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+Y">Yunliang Jiang</a>, <a href="/search/?searchtype=author&amp;query=Zheng%2C+Z">Zhonglong Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.11236v1-abstract-short" style="display: inline;"> This paper introduces a promising alternative method for training Generative Adversarial Networks (GANs) on large-scale datasets with clear theoretical guarantees. GANs are typically learned through a minimax game between a generator and a discriminator, which is known to be empirically unstable. Previous learning paradigms have encountered mode collapse issues without a theoretical solution. To a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.11236v1-abstract-full').style.display = 'inline'; document.getElementById('2501.11236v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.11236v1-abstract-full" style="display: none;"> This paper introduces a promising alternative method for training Generative Adversarial Networks (GANs) on large-scale datasets with clear theoretical guarantees. GANs are typically learned through a minimax game between a generator and a discriminator, which is known to be empirically unstable. Previous learning paradigms have encountered mode collapse issues without a theoretical solution. To address these challenges, we propose a novel Lipschitz-constrained Functional Gradient GANs learning (Li-CFG) method to stabilize the training of GAN and provide a theoretical foundation for effectively increasing the diversity of synthetic samples by reducing the neighborhood size of the latent vector. Specifically, we demonstrate that the neighborhood size of the latent vector can be reduced by increasing the norm of the discriminator gradient, resulting in enhanced diversity of synthetic samples. To efficiently enlarge the norm of the discriminator gradient, we introduce a novel 蔚-centered gradient penalty that amplifies the norm of the discriminator gradient using the hyper-parameter 蔚. In comparison to other constraints, our method enlarging the discriminator norm, thus obtaining the smallest neighborhood size of the latent vector. Extensive experiments on benchmark datasets for image generation demonstrate the efficacy of the Li-CFG method and the 蔚-centered gradient penalty. The results showcase improved stability and increased diversity of synthetic samples. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.11236v1-abstract-full').style.display = 'none'; document.getElementById('2501.11236v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Machine learning 2024 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.10555">arXiv:2501.10555</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.10555">pdf</a>, <a href="https://arxiv.org/format/2501.10555">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Towards Data-Centric AI: A Comprehensive Survey of Traditional, Reinforcement, and Generative Approaches for Tabular Data Transformation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+D">Dongjie Wang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+Y">Yanyong Huang</a>, <a href="/search/?searchtype=author&amp;query=Ying%2C+W">Wangyang Ying</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+H">Haoyue Bai</a>, <a href="/search/?searchtype=author&amp;query=Gong%2C+N">Nanxu Gong</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+X">Xinyuan Wang</a>, <a href="/search/?searchtype=author&amp;query=Dong%2C+S">Sixun Dong</a>, <a href="/search/?searchtype=author&amp;query=Zhe%2C+T">Tao Zhe</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+K">Kunpeng Liu</a>, <a href="/search/?searchtype=author&amp;query=Xiao%2C+M">Meng Xiao</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+P">Pengfei Wang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+P">Pengyang Wang</a>, <a href="/search/?searchtype=author&amp;query=Xiong%2C+H">Hui Xiong</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yanjie Fu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.10555v1-abstract-short" style="display: inline;"> Tabular data is one of the most widely used formats across industries, driving critical applications in areas such as finance, healthcare, and marketing. In the era of data-centric AI, improving data quality and representation has become essential for enhancing model performance, particularly in applications centered around tabular data. This survey examines the key aspects of tabular data-centric&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.10555v1-abstract-full').style.display = 'inline'; document.getElementById('2501.10555v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.10555v1-abstract-full" style="display: none;"> Tabular data is one of the most widely used formats across industries, driving critical applications in areas such as finance, healthcare, and marketing. In the era of data-centric AI, improving data quality and representation has become essential for enhancing model performance, particularly in applications centered around tabular data. This survey examines the key aspects of tabular data-centric AI, emphasizing feature selection and feature generation as essential techniques for data space refinement. We provide a systematic review of feature selection methods, which identify and retain the most relevant data attributes, and feature generation approaches, which create new features to simplify the capture of complex data patterns. This survey offers a comprehensive overview of current methodologies through an analysis of recent advancements, practical applications, and the strengths and limitations of these techniques. Finally, we outline open challenges and suggest future perspectives to inspire continued innovation in this field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.10555v1-abstract-full').style.display = 'none'; document.getElementById('2501.10555v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.10396">arXiv:2501.10396</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.10396">pdf</a>, <a href="https://arxiv.org/format/2501.10396">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> AI-Powered Urban Transportation Digital Twin: Methods and Applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Di%2C+X">Xuan Di</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yongjie Fu</a>, <a href="/search/?searchtype=author&amp;query=Turkcan%2C+M+K">Mehmet K. Turkcan</a>, <a href="/search/?searchtype=author&amp;query=Ghasemi%2C+M">Mahshid Ghasemi</a>, <a href="/search/?searchtype=author&amp;query=Mo%2C+Z">Zhaobin Mo</a>, <a href="/search/?searchtype=author&amp;query=Zang%2C+C">Chengbo Zang</a>, <a href="/search/?searchtype=author&amp;query=Adhikari%2C+A">Abhishek Adhikari</a>, <a href="/search/?searchtype=author&amp;query=Kostic%2C+Z">Zoran Kostic</a>, <a href="/search/?searchtype=author&amp;query=Zussman%2C+G">Gil Zussman</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.10396v1-abstract-short" style="display: inline;"> We present a survey paper on methods and applications of digital twins (DT) for urban traffic management. While the majority of studies on the DT focus on its &#34;eyes,&#34; which is the emerging sensing and perception like object detection and tracking, what really distinguishes the DT from a traditional simulator lies in its ``brain,&#34; the prediction and decision making capabilities of extracting patter&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.10396v1-abstract-full').style.display = 'inline'; document.getElementById('2501.10396v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.10396v1-abstract-full" style="display: none;"> We present a survey paper on methods and applications of digital twins (DT) for urban traffic management. While the majority of studies on the DT focus on its &#34;eyes,&#34; which is the emerging sensing and perception like object detection and tracking, what really distinguishes the DT from a traditional simulator lies in its ``brain,&#34; the prediction and decision making capabilities of extracting patterns and making informed decisions from what has been seen and perceived. In order to add values to urban transportation management, DTs need to be powered by artificial intelligence and complement with low-latency high-bandwidth sensing and networking technologies. We will first review the DT pipeline leveraging cyberphysical systems and propose our DT architecture deployed on a real-world testbed in New York City. This survey paper can be a pointer to help researchers and practitioners identify challenges and opportunities for the development of DTs; a bridge to initiate conversations across disciplines; and a road map to exploiting potentials of DTs for diverse urban transportation applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.10396v1-abstract-full').style.display = 'none'; document.getElementById('2501.10396v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.10130">arXiv:2501.10130</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.10130">pdf</a>, <a href="https://arxiv.org/format/2501.10130">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Study of $畏\rightarrow蟺^+蟺^-l^+l^-$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (637 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.10130v1-abstract-short" style="display: inline;"> Using a sample of $(10087\pm44)\times10^{6}$ $J/蠄$ events accumulated with the BESIII detector, we analyze the decays $畏\rightarrow蟺^+蟺^-l^+l^-$ ($l=e$ or $渭$) via the process $J/蠄\rightarrow纬畏$. The branching fraction of $畏\rightarrow蟺^+蟺^-e^+e^-$ is measured to be $\mathcal{B}(畏\rightarrow蟺^+蟺^-e^+e^-)=(3.07\pm0.12_{\rm{stat.}}\pm0.19_{\rm{syst.}}) \times10^{-4}$. No signal events are observed f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.10130v1-abstract-full').style.display = 'inline'; document.getElementById('2501.10130v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.10130v1-abstract-full" style="display: none;"> Using a sample of $(10087\pm44)\times10^{6}$ $J/蠄$ events accumulated with the BESIII detector, we analyze the decays $畏\rightarrow蟺^+蟺^-l^+l^-$ ($l=e$ or $渭$) via the process $J/蠄\rightarrow纬畏$. The branching fraction of $畏\rightarrow蟺^+蟺^-e^+e^-$ is measured to be $\mathcal{B}(畏\rightarrow蟺^+蟺^-e^+e^-)=(3.07\pm0.12_{\rm{stat.}}\pm0.19_{\rm{syst.}}) \times10^{-4}$. No signal events are observed for the $畏\rightarrow蟺^{+}蟺^{-}渭^{+}渭^{-}$ decay, leading to an upper limit on the branching fraction of $\mathcal{B}(畏\rightarrow蟺^{+}蟺^{-}渭^{+}渭^{-})&lt;4.0\times10^{-7}$ at the 90\% confidence level. Furthermore, the $CP$-violation asymmetry parameter is found to be $\mathcal{A}_{CP}(畏\rightarrow蟺^{+}蟺^{-}e^{+}e^{-})=(-4.04\pm4.69_{\rm{stat.}}\pm0.14_{\rm{syst.}})\%$, showing no evidence of $CP$-violation with current statistics. Additionally, we extract the transition form factor from the decay amplitude of $畏\rightarrow蟺^+蟺^-e^+e^-$. Finally, axion-like particles are searched for via the decay $畏\rightarrow蟺^+蟺^-a, a\rightarrow e^+e^-$, and upper limits on this branching fraction relative to that of $畏\rightarrow蟺^+蟺^-e^+e^-$ are presented as a function of the axion-like particle mass in the range $5-200\ \mathrm{MeV}/c^{2}$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.10130v1-abstract-full').style.display = 'none'; document.getElementById('2501.10130v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.09619">arXiv:2501.09619</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.09619">pdf</a>, <a href="https://arxiv.org/format/2501.09619">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Strongly Correlated Electrons">cond-mat.str-el</span> </div> </div> <p class="title is-5 mathjax"> Berezinskii-Kosterlitz-Thouless region and magnetization plateaus in easy-axis triangular weak-dimer antiferromagnet K$_2$Co$_2$(SeO$_3$)$_3$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Ying Fu</a>, <a href="/search/?searchtype=author&amp;query=Ge%2C+H">Han Ge</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+J">Jian Chen</a>, <a href="/search/?searchtype=author&amp;query=Xiao%2C+J">Jie Xiao</a>, <a href="/search/?searchtype=author&amp;query=Tan%2C+Y">Yi Tan</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+L">Le Wang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+J">Junfeng Wang</a>, <a href="/search/?searchtype=author&amp;query=Dong%2C+C">Chao Dong</a>, <a href="/search/?searchtype=author&amp;query=Qu%2C+Z">Zhe Qu</a>, <a href="/search/?searchtype=author&amp;query=He%2C+M">Miao He</a>, <a href="/search/?searchtype=author&amp;query=Xi%2C+C">Chuanying Xi</a>, <a href="/search/?searchtype=author&amp;query=Ling%2C+L">Langsheng Ling</a>, <a href="/search/?searchtype=author&amp;query=Xi%2C+B">Bin Xi</a>, <a href="/search/?searchtype=author&amp;query=Mei%2C+J">Jia-Wei Mei</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.09619v2-abstract-short" style="display: inline;"> We investigate the magnetic phase diagram of the bilayer triangular antiferromagnet K$_2$Co$_2$(SeO$_3$)$_3$, revealing a rich interplay among geometric frustration, bilayer coupling, and symmetry-driven phenomena. High-field magnetization measurements show fractional magnetization plateaus at 1/3, 1/2, 2/3, and 5/6 of the saturation magnetization. To elucidate the experimental magnetic phase diag&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.09619v2-abstract-full').style.display = 'inline'; document.getElementById('2501.09619v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.09619v2-abstract-full" style="display: none;"> We investigate the magnetic phase diagram of the bilayer triangular antiferromagnet K$_2$Co$_2$(SeO$_3$)$_3$, revealing a rich interplay among geometric frustration, bilayer coupling, and symmetry-driven phenomena. High-field magnetization measurements show fractional magnetization plateaus at 1/3, 1/2, 2/3, and 5/6 of the saturation magnetization. To elucidate the experimental magnetic phase diagram at low fields, we propose that K$_2$Co$_2$(SeO$_3$)$_3$ can be described as an easy-axis triangular weak-dimer antiferromagnet. We emphasize the critical role of the emergent $U(1) \otimes S_3$ symmetry, where $S_3 = \mathbb{Z}_3 \otimes \mathbb{Z}_2^d$, in determining the magnetic phases at low fields. The remarkable agreement between the experimental and theoretical phase diagrams suggests that the phase transitions are governed by this symmetry. Notably, our combined experimental and theoretical results identify a Berezinskii-Kosterlitz-Thouless (BKT) phase region at finite fields. These findings provide new insights into the phase structure of frustrated magnets and establish K$_2$Co$_2$(SeO$_3$)$_3$ as a compelling platform for exploring unconventional quantum phenomena in $U(1) \otimes S_3$ systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.09619v2-abstract-full').style.display = 'none'; document.getElementById('2501.09619v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 4 figures. Supplementary material is included in source file. Typos fixed</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.09350">arXiv:2501.09350</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.09350">pdf</a>, <a href="https://arxiv.org/format/2501.09350">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Making Your Dreams A Reality: Decoding the Dreams into a Coherent Video Story from fMRI Signals </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yanwei Fu</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+J">Jianxiong Gao</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+B">Baofeng Yang</a>, <a href="/search/?searchtype=author&amp;query=Feng%2C+J">Jianfeng Feng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.09350v1-abstract-short" style="display: inline;"> This paper studies the brave new idea for Multimedia community, and proposes a novel framework to convert dreams into coherent video narratives using fMRI data. Essentially, dreams have intrigued humanity for centuries, offering glimpses into our subconscious minds. Recent advancements in brain imaging, particularly functional magnetic resonance imaging (fMRI), have provided new ways to explore th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.09350v1-abstract-full').style.display = 'inline'; document.getElementById('2501.09350v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.09350v1-abstract-full" style="display: none;"> This paper studies the brave new idea for Multimedia community, and proposes a novel framework to convert dreams into coherent video narratives using fMRI data. Essentially, dreams have intrigued humanity for centuries, offering glimpses into our subconscious minds. Recent advancements in brain imaging, particularly functional magnetic resonance imaging (fMRI), have provided new ways to explore the neural basis of dreaming. By combining subjective dream experiences with objective neurophysiological data, we aim to understand the visual aspects of dreams and create complete video narratives. Our process involves three main steps: reconstructing visual perception, decoding dream imagery, and integrating dream stories. Using innovative techniques in fMRI analysis and language modeling, we seek to push the boundaries of dream research and gain deeper insights into visual experiences during sleep. This technical report introduces a novel approach to visually decoding dreams using fMRI signals and weaving dream visuals into narratives using language models. We gather a dataset of dreams along with descriptions to assess the effectiveness of our framework. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.09350v1-abstract-full').style.display = 'none'; document.getElementById('2501.09350v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Work in progress</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.09341">arXiv:2501.09341</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.09341">pdf</a>, <a href="https://arxiv.org/format/2501.09341">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SE-BSFV: Online Subspace Learning based Shadow Enhancement and Background Suppression for ViSAR under Complex Background </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Yan%2C+S">Shangqu Yan</a>, <a href="/search/?searchtype=author&amp;query=Luo%2C+C">Chenyang Luo</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yaowen Fu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+W">Wenpeng Zhang</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+W">Wei Yang</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+R">Ruofeng Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.09341v1-abstract-short" style="display: inline;"> Video synthetic aperture radar (ViSAR) has attracted substantial attention in the moving target detection (MTD) field due to its ability to continuously monitor changes in the target area. In ViSAR, the moving targets&#39; shadows will not offset and defocus, which is widely used as a feature for MTD. However, the shadows are difficult to distinguish from the low scattering region in the background, w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.09341v1-abstract-full').style.display = 'inline'; document.getElementById('2501.09341v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.09341v1-abstract-full" style="display: none;"> Video synthetic aperture radar (ViSAR) has attracted substantial attention in the moving target detection (MTD) field due to its ability to continuously monitor changes in the target area. In ViSAR, the moving targets&#39; shadows will not offset and defocus, which is widely used as a feature for MTD. However, the shadows are difficult to distinguish from the low scattering region in the background, which will cause more missing and false alarms. Therefore, it is worth investigating how to enhance the distinction between the shadows and background. In this study, we proposed the Shadow Enhancement and Background Suppression for ViSAR (SE-BSFV) algorithm. The SE-BSFV algorithm is based on the low-rank representation (LRR) theory and adopts online subspace learning technique to enhance shadows and suppress background for ViSAR images. Firstly, we use a registration algorithm to register the ViSAR images and utilize Gaussian mixture distribution (GMD) to model the ViSAR data. Secondly, the knowledge learned from the previous frames is leveraged to estimate the GMD parameters of the current frame, and the Expectation-maximization (EM) algorithm is used to estimate the subspace parameters. Then, the foreground matrix of the current frame can be obtained. Finally, the alternating direction method of multipliers (ADMM) is used to eliminate strong scattering objects in the foreground matrix to obtain the final results. The experimental results indicate that the SE-BSFV algorithm significantly enhances the shadows&#39; saliency and greatly improves the detection performance while ensuring efficiency compared with several other advanced pre-processing algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.09341v1-abstract-full').style.display = 'none'; document.getElementById('2501.09341v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.08697">arXiv:2501.08697</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.08697">pdf</a>, <a href="https://arxiv.org/format/2501.08697">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Mesoscale and Nanoscale Physics">cond-mat.mes-hall</span> </div> </div> <p class="title is-5 mathjax"> ABACUS: An Electronic Structure Analysis Package for the AI Era </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+W">Weiqing Zhou</a>, <a href="/search/?searchtype=author&amp;query=Zheng%2C+D">Daye Zheng</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Q">Qianrui Liu</a>, <a href="/search/?searchtype=author&amp;query=Lu%2C+D">Denghui Lu</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Y">Yu Liu</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+P">Peize Lin</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+Y">Yike Huang</a>, <a href="/search/?searchtype=author&amp;query=Peng%2C+X">Xingliang Peng</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+J+J">Jie J. Bao</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+C">Chun Cai</a>, <a href="/search/?searchtype=author&amp;query=Jin%2C+Z">Zuxin Jin</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+J">Jing Wu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+H">Haochong Zhang</a>, <a href="/search/?searchtype=author&amp;query=Jin%2C+G">Gan Jin</a>, <a href="/search/?searchtype=author&amp;query=Ji%2C+Y">Yuyang Ji</a>, <a href="/search/?searchtype=author&amp;query=Shen%2C+Z">Zhenxiong Shen</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+X">Xiaohui Liu</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+L">Liang Sun</a>, <a href="/search/?searchtype=author&amp;query=Cao%2C+Y">Yu Cao</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+M">Menglin Sun</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+J">Jianchuan Liu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+T">Tao Chen</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+R">Renxi Liu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yuanbo Li</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+H">Haozhi Han</a> , et al. (28 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.08697v2-abstract-short" style="display: inline;"> ABACUS (Atomic-orbital Based Ab-initio Computation at USTC) is an open-source software for first-principles electronic structure calculations and molecular dynamics simulations. It mainly features density functional theory (DFT) and is compatible with both plane-wave basis sets and numerical atomic orbital basis sets. ABACUS serves as a platform that facilitates the integration of various electron&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.08697v2-abstract-full').style.display = 'inline'; document.getElementById('2501.08697v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.08697v2-abstract-full" style="display: none;"> ABACUS (Atomic-orbital Based Ab-initio Computation at USTC) is an open-source software for first-principles electronic structure calculations and molecular dynamics simulations. It mainly features density functional theory (DFT) and is compatible with both plane-wave basis sets and numerical atomic orbital basis sets. ABACUS serves as a platform that facilitates the integration of various electronic structure methods, such as Kohn-Sham DFT, stochastic DFT, orbital-free DFT, and real-time time-dependent DFT, etc. In addition, with the aid of high-performance computing, ABACUS is designed to perform efficiently and provide massive amounts of first-principles data for generating general-purpose machine learning potentials, such as DPA models. Furthermore, ABACUS serves as an electronic structure platform that interfaces with several AI-assisted algorithms and packages, such as DeePKS-kit, DeePMD, DP-GEN, DeepH, DeePTB, etc. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.08697v2-abstract-full').style.display = 'none'; document.getElementById('2501.08697v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.08080">arXiv:2501.08080</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.08080">pdf</a>, <a href="https://arxiv.org/format/2501.08080">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Search for the FCNC charmonium decay $J/蠄\to D^0 渭^+ 渭^- + \text{c.c.}$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a>, <a href="/search/?searchtype=author&amp;query=Brueggemann%2C+A">A. Brueggemann</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+H">H. Cai</a> , et al. (680 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.08080v2-abstract-short" style="display: inline;"> Based on a data sample of $(10087 \pm 44) \times 10^6$ $J/蠄$ events taken with the BESIII detector, we search for the flavor-changing neutral current charmonium decay $J/蠄\to D^{0} 渭^{+} 渭^{-} + \text{c.c.}$. No significant signal above the background is observed, and the upper limit on its branching fraction is set to be $\mathcal{B}(J/蠄\to D^{0}渭^{+}渭^{-} + \text{c.c.} ) &lt; 1.1 \times 10^{-7}$ at&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.08080v2-abstract-full').style.display = 'inline'; document.getElementById('2501.08080v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.08080v2-abstract-full" style="display: none;"> Based on a data sample of $(10087 \pm 44) \times 10^6$ $J/蠄$ events taken with the BESIII detector, we search for the flavor-changing neutral current charmonium decay $J/蠄\to D^{0} 渭^{+} 渭^{-} + \text{c.c.}$. No significant signal above the background is observed, and the upper limit on its branching fraction is set to be $\mathcal{B}(J/蠄\to D^{0}渭^{+}渭^{-} + \text{c.c.} ) &lt; 1.1 \times 10^{-7}$ at the 90% confidence level. This marks the first search for a flavor-changing neutral current charmonium decay involving muons in the final state. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.08080v2-abstract-full').style.display = 'none'; document.getElementById('2501.08080v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.06744">arXiv:2501.06744</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.06744">pdf</a>, <a href="https://arxiv.org/format/2501.06744">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Enabling Cardiac Monitoring using In-ear Ballistocardiogram on COTS Wireless Earbuds </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yongjian Fu</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+K">Ke Sun</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+R">Ruyao Wang</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+X">Xinyi Li</a>, <a href="/search/?searchtype=author&amp;query=Ren%2C+J">Ju Ren</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Yaoxue Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+X">Xinyu Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.06744v1-abstract-short" style="display: inline;"> The human ear offers a unique opportunity for cardiac monitoring due to its physiological and practical advantages. However, existing earable solutions require additional hardware and complex processing, posing challenges for commercial True Wireless Stereo (TWS) earbuds which are limited by their form factor and resources. In this paper, we propose TWSCardio, a novel system that repurposes the IM&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.06744v1-abstract-full').style.display = 'inline'; document.getElementById('2501.06744v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.06744v1-abstract-full" style="display: none;"> The human ear offers a unique opportunity for cardiac monitoring due to its physiological and practical advantages. However, existing earable solutions require additional hardware and complex processing, posing challenges for commercial True Wireless Stereo (TWS) earbuds which are limited by their form factor and resources. In this paper, we propose TWSCardio, a novel system that repurposes the IMU sensors in TWS earbuds for cardiac monitoring. Our key finding is that these sensors can capture in-ear ballistocardiogram (BCG) signals. TWSCardio reuses the unstable Bluetooth channel to stream the IMU data to a smartphone for BCG processing. It incorporates a signal enhancement framework to address issues related to missing data and low sampling rate, while mitigating motion artifacts by fusing multi-axis information. Furthermore, it employs a region-focused signal reconstruction method to translate the multi-axis in-ear BCG signals into fine-grained seismocardiogram (SCG) signals. We have implemented TWSCardio as an efficient real-time app. Our experiments on 100 subjects verify that TWSCardio can accurately reconstruct cardiac signals while showing resilience to motion artifacts, missing data, and low sampling rates. Our case studies further demonstrate that TWSCardio can support diverse cardiac monitoring applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.06744v1-abstract-full').style.display = 'none'; document.getElementById('2501.06744v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.06426">arXiv:2501.06426</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.06426">pdf</a>, <a href="https://arxiv.org/format/2501.06426">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Search for $K^0_S$ invisible decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (642 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.06426v1-abstract-short" style="display: inline;"> Based on $(1.0087\pm0.0044)\times10^{10}$ $J/蠄$ events collected with the BESIII detector at the BEPCII $e^+e^-$ storage ring, we search for $K_{S}^{0}$ invisible decays via the $J/蠄\to 蠁K_{S}^{0} K_{S}^{0}$ process. No significant signal is observed, and the upper limit of the branching fraction of these invisible decays is set at 8.4 $\times$ $10^{-4}$ at the 90\% confidence level. This is the f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.06426v1-abstract-full').style.display = 'inline'; document.getElementById('2501.06426v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.06426v1-abstract-full" style="display: none;"> Based on $(1.0087\pm0.0044)\times10^{10}$ $J/蠄$ events collected with the BESIII detector at the BEPCII $e^+e^-$ storage ring, we search for $K_{S}^{0}$ invisible decays via the $J/蠄\to 蠁K_{S}^{0} K_{S}^{0}$ process. No significant signal is observed, and the upper limit of the branching fraction of these invisible decays is set at 8.4 $\times$ $10^{-4}$ at the 90\% confidence level. This is the first experimental search for $K^0_S$ invisible decays. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.06426v1-abstract-full').style.display = 'none'; document.getElementById('2501.06426v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.05687">arXiv:2501.05687</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.05687">pdf</a>, <a href="https://arxiv.org/format/2501.05687">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> UniQ: Unified Decoder with Task-specific Queries for Efficient Scene Graph Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liao%2C+X">Xinyao Liao</a>, <a href="/search/?searchtype=author&amp;query=Wei%2C+W">Wei Wei</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+D">Dangyang Chen</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yuanyuan Fu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.05687v1-abstract-short" style="display: inline;"> Scene Graph Generation(SGG) is a scene understanding task that aims at identifying object entities and reasoning their relationships within a given image. In contrast to prevailing two-stage methods based on a large object detector (e.g., Faster R-CNN), one-stage methods integrate a fixed-size set of learnable queries to jointly reason relational triplets &lt;subject, predicate, object&gt;. This paradig&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.05687v1-abstract-full').style.display = 'inline'; document.getElementById('2501.05687v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.05687v1-abstract-full" style="display: none;"> Scene Graph Generation(SGG) is a scene understanding task that aims at identifying object entities and reasoning their relationships within a given image. In contrast to prevailing two-stage methods based on a large object detector (e.g., Faster R-CNN), one-stage methods integrate a fixed-size set of learnable queries to jointly reason relational triplets &lt;subject, predicate, object&gt;. This paradigm demonstrates robust performance with significantly reduced parameters and computational overhead. However, the challenge in one-stage methods stems from the issue of weak entanglement, wherein entities involved in relationships require both coupled features shared within triplets and decoupled visual features. Previous methods either adopt a single decoder for coupled triplet feature modeling or multiple decoders for separate visual feature extraction but fail to consider both. In this paper, we introduce UniQ, a Unified decoder with task-specific Queries architecture, where task-specific queries generate decoupled visual features for subjects, objects, and predicates respectively, and unified decoder enables coupled feature modeling within relational triplets. Experimental results on the Visual Genome dataset demonstrate that UniQ has superior performance to both one-stage and two-stage methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.05687v1-abstract-full').style.display = 'none'; document.getElementById('2501.05687v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 5 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.10 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.05239">arXiv:2501.05239</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.05239">pdf</a>, <a href="https://arxiv.org/format/2501.05239">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Is Your Autonomous Vehicle Safe? Understanding the Threat of Electromagnetic Signal Injection Attacks on Traffic Scene Perception </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liao%2C+W">Wenhao Liao</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+S">Sineng Yan</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Youqian Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhai%2C+X">Xinwei Zhai</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yuanyuan Wang</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+E+Y">Eugene Yujun Fu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.05239v1-abstract-short" style="display: inline;"> Autonomous vehicles rely on camera-based perception systems to comprehend their driving environment and make crucial decisions, thereby ensuring vehicles to steer safely. However, a significant threat known as Electromagnetic Signal Injection Attacks (ESIA) can distort the images captured by these cameras, leading to incorrect AI decisions and potentially compromising the safety of autonomous vehi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.05239v1-abstract-full').style.display = 'inline'; document.getElementById('2501.05239v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.05239v1-abstract-full" style="display: none;"> Autonomous vehicles rely on camera-based perception systems to comprehend their driving environment and make crucial decisions, thereby ensuring vehicles to steer safely. However, a significant threat known as Electromagnetic Signal Injection Attacks (ESIA) can distort the images captured by these cameras, leading to incorrect AI decisions and potentially compromising the safety of autonomous vehicles. Despite the serious implications of ESIA, there is limited understanding of its impacts on the robustness of AI models across various and complex driving scenarios. To address this gap, our research analyzes the performance of different models under ESIA, revealing their vulnerabilities to the attacks. Moreover, due to the challenges in obtaining real-world attack data, we develop a novel ESIA simulation method and generate a simulated attack dataset for different driving scenarios. Our research provides a comprehensive simulation and evaluation framework, aiming to enhance the development of more robust AI models and secure intelligent systems, ultimately contributing to the advancement of safer and more reliable technology across various fields. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.05239v1-abstract-full').style.display = 'none'; document.getElementById('2501.05239v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in AAAI 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.05098">arXiv:2501.05098</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.05098">pdf</a>, <a href="https://arxiv.org/format/2501.05098">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Motion-X++: A Large-Scale Multimodal 3D Whole-body Human Motion Dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Yuhong Zhang</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+J">Jing Lin</a>, <a href="/search/?searchtype=author&amp;query=Zeng%2C+A">Ailing Zeng</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+G">Guanlin Wu</a>, <a href="/search/?searchtype=author&amp;query=Lu%2C+S">Shunlin Lu</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yurong Fu</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+Y">Yuanhao Cai</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+R">Ruimao Zhang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+H">Haoqian Wang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+L">Lei Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.05098v1-abstract-short" style="display: inline;"> In this paper, we introduce Motion-X++, a large-scale multimodal 3D expressive whole-body human motion dataset. Existing motion datasets predominantly capture body-only poses, lacking facial expressions, hand gestures, and fine-grained pose descriptions, and are typically limited to lab settings with manually labeled text descriptions, thereby restricting their scalability. To address this issue,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.05098v1-abstract-full').style.display = 'inline'; document.getElementById('2501.05098v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.05098v1-abstract-full" style="display: none;"> In this paper, we introduce Motion-X++, a large-scale multimodal 3D expressive whole-body human motion dataset. Existing motion datasets predominantly capture body-only poses, lacking facial expressions, hand gestures, and fine-grained pose descriptions, and are typically limited to lab settings with manually labeled text descriptions, thereby restricting their scalability. To address this issue, we develop a scalable annotation pipeline that can automatically capture 3D whole-body human motion and comprehensive textural labels from RGB videos and build the Motion-X dataset comprising 81.1K text-motion pairs. Furthermore, we extend Motion-X into Motion-X++ by improving the annotation pipeline, introducing more data modalities, and scaling up the data quantities. Motion-X++ provides 19.5M 3D whole-body pose annotations covering 120.5K motion sequences from massive scenes, 80.8K RGB videos, 45.3K audios, 19.5M frame-level whole-body pose descriptions, and 120.5K sequence-level semantic labels. Comprehensive experiments validate the accuracy of our annotation pipeline and highlight Motion-X++&#39;s significant benefits for generating expressive, precise, and natural motion with paired multimodal labels supporting several downstream tasks, including text-driven whole-body motion generation,audio-driven motion generation, 3D whole-body human mesh recovery, and 2D whole-body keypoints estimation, etc. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.05098v1-abstract-full').style.display = 'none'; document.getElementById('2501.05098v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 14 figures, This work extends and enhances the research published in the NeurIPS 2023 paper, &#34;Motion-X: A Large-scale 3D Expressive Whole-body Human Motion Dataset&#34;. arXiv admin note: substantial text overlap with arXiv:2307.00818</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.04760">arXiv:2501.04760</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.04760">pdf</a>, <a href="https://arxiv.org/format/2501.04760">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Search for the leptonic decay $D^{+}\to e^{+}谓_{e}$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (646 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.04760v1-abstract-short" style="display: inline;"> We search for the leptonic decay $D^+\to e^+谓_{e}$ using an $e^+e^-$ collision data sample with an integrated luminosity of 20.3~fb$^{-1}$ collected with the BESIII detector at the center-of-mass energy of 3.773~GeV. No significant signal is observed and an upper limit on the branching fraction of $D^+\to e^+谓_{e}$ is set as $9.7 \times 10^{-7}$, at the 90\% confidence level. Our upper limit is an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.04760v1-abstract-full').style.display = 'inline'; document.getElementById('2501.04760v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.04760v1-abstract-full" style="display: none;"> We search for the leptonic decay $D^+\to e^+谓_{e}$ using an $e^+e^-$ collision data sample with an integrated luminosity of 20.3~fb$^{-1}$ collected with the BESIII detector at the center-of-mass energy of 3.773~GeV. No significant signal is observed and an upper limit on the branching fraction of $D^+\to e^+谓_{e}$ is set as $9.7 \times 10^{-7}$, at the 90\% confidence level. Our upper limit is an order of magnitude smaller than the previous limit for this decay mode. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.04760v1-abstract-full').style.display = 'none'; document.getElementById('2501.04760v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.04451">arXiv:2501.04451</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.04451">pdf</a>, <a href="https://arxiv.org/format/2501.04451">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Observation of the $W$-annihilation process $D_s^+ \to 蠅蟻^+$ and measurement of $D_s^+ \to 蠁蟻^+$ in $D^+_s\to 蟺^+蟺^+蟺^-蟺^0蟺^0$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (642 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.04451v1-abstract-short" style="display: inline;"> We present the first amplitude analysis and branching fraction measurement of the decay $D^+_s\to 蟺^+蟺^+蟺^-蟺^0蟺^0$, using $e^+e^-$ collision data collected with the BESIII detector at center-of-mass energies between 4.128 and 4.226 GeV corresponding to an integrated luminosity of 7.33 fb$^{-1}$, and report the first observation of the pure $W$-annihilation decay $D_s^+ \to 蠅蟻^+$ with a branching f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.04451v1-abstract-full').style.display = 'inline'; document.getElementById('2501.04451v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.04451v1-abstract-full" style="display: none;"> We present the first amplitude analysis and branching fraction measurement of the decay $D^+_s\to 蟺^+蟺^+蟺^-蟺^0蟺^0$, using $e^+e^-$ collision data collected with the BESIII detector at center-of-mass energies between 4.128 and 4.226 GeV corresponding to an integrated luminosity of 7.33 fb$^{-1}$, and report the first observation of the pure $W$-annihilation decay $D_s^+ \to 蠅蟻^+$ with a branching fraction of $(0.99\pm0.08_{\rm stat}\pm0.07_{\rm syst})\%$. In comparison to the low significance of the $\mathcal{D}$ wave in the decay $D_s^+ \to 蠁蟻^+$, the dominance of the $\mathcal{D}$ wave over the $\mathcal{S}$ and $\mathcal{P}$ waves, with a fraction of $(51.85\pm7.28_{\rm stat}\pm7.90_{\rm syst})\%$ observed in the decay, provides crucial information for the``polarization puzzle&#34;, as well as for the understanding of charm meson decays. The branching fraction of $D^+_s\to 蟺^+蟺^+蟺^-蟺^0蟺^0$ is measured to be $(4.41\pm0.15_{\rm stat}\pm0.13_{\rm syst})\%$. Moreover, the branching fraction of $D_s^+ \to 蠁蟻^+$ is measured to be $(3.98\pm0.33_{\rm stat}\pm0.21_{\rm syst})\%$, and the $R_蠁= {\mathcal{B}(蠁\to蟺^+蟺^-蟺^0)}/{\mathcal{B}(蠁\to K^+K^-)}$ is determined to be $(0.222\pm0.019_{\rm stat}\pm0.016_{\rm syst}$), which is consistent with the previous measurement based on charm meson decays, but deviates from the results from $e^+e^-$ annihilation and $K$-$N$ scattering experiments by more than 3$蟽$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.04451v1-abstract-full').style.display = 'none'; document.getElementById('2501.04451v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.04344">arXiv:2501.04344</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.04344">pdf</a>, <a href="https://arxiv.org/format/2501.04344">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Study of the electromagnetic Dalitz decay $J/蠄\to e^+e^- 蟺^0$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (639 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.04344v1-abstract-short" style="display: inline;"> We study the electromagnetic Dalitz decay $J/蠄\to e^+e^- 蟺^0$ using $(10087 \pm 44) \times 10^6$ $J/蠄$ events collected by the \bes detector. The di-electron-invariant-mass dependent transition form factor of this decay is explored for the first time. A significant resonant structure corresponding to the $蟻/蠅$ resonance is observed, which cannot be described by existing theoretical models, due to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.04344v1-abstract-full').style.display = 'inline'; document.getElementById('2501.04344v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.04344v1-abstract-full" style="display: none;"> We study the electromagnetic Dalitz decay $J/蠄\to e^+e^- 蟺^0$ using $(10087 \pm 44) \times 10^6$ $J/蠄$ events collected by the \bes detector. The di-electron-invariant-mass dependent transition form factor of this decay is explored for the first time. A significant resonant structure corresponding to the $蟻/蠅$ resonance is observed, which cannot be described by existing theoretical models, due to contributions from the isospin-conserving $J/蠄\to 蟻蟺^0$ and isospin-volating $J/蠄\to 蠅蟺^0$ decays. The observed $蟻$--$蠅$ interference is consistent with that of the pion form factor but features a relatively narrow $蟻$ peak. By taking into account the contribution of this resonant structure, the branching fraction of $J/蠄\to e^+e^- 蟺^0$ in the full $e^+e^-$ invariant mass spectrum range is also measured for the first time to be $(8.06 \pm 0.31 (\rm{stat}) \pm 0.38 (\rm{syst}))\times 10^{-7}$, which is two times larger than the prediction of the Vector Meson Dominance model due to the observed resonant contribution of $蟻/蠅$ resonances. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.04344v1-abstract-full').style.display = 'none'; document.getElementById('2501.04344v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 4 figures, Submitted to Phys. Rev. Lett</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> BAM-325 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.04140">arXiv:2501.04140</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.04140">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Spatiotemporal Gaussian Optimization for 4D Cone Beam CT Reconstruction from Sparse Projections </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yabo Fu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+H">Hao Zhang</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+W">Weixing Cai</a>, <a href="/search/?searchtype=author&amp;query=Xie%2C+H">Huiqiao Xie</a>, <a href="/search/?searchtype=author&amp;query=Kuo%2C+L">Licheng Kuo</a>, <a href="/search/?searchtype=author&amp;query=Cervino%2C+L">Laura Cervino</a>, <a href="/search/?searchtype=author&amp;query=Moran%2C+J">Jean Moran</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+X">Xiang Li</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+T">Tianfang Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.04140v1-abstract-short" style="display: inline;"> In image-guided radiotherapy (IGRT), four-dimensional cone-beam computed tomography (4D-CBCT) is critical for assessing tumor motion during a patients breathing cycle prior to beam delivery. However, generating 4D-CBCT images with sufficient quality requires significantly more projection images than a standard 3D-CBCT scan, leading to extended scanning times and increased imaging dose to the patie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.04140v1-abstract-full').style.display = 'inline'; document.getElementById('2501.04140v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.04140v1-abstract-full" style="display: none;"> In image-guided radiotherapy (IGRT), four-dimensional cone-beam computed tomography (4D-CBCT) is critical for assessing tumor motion during a patients breathing cycle prior to beam delivery. However, generating 4D-CBCT images with sufficient quality requires significantly more projection images than a standard 3D-CBCT scan, leading to extended scanning times and increased imaging dose to the patient. To address these limitations, there is a strong demand for methods capable of reconstructing high-quality 4D-CBCT images from a 1-minute 3D-CBCT acquisition. The challenge lies in the sparse sampling of projections, which introduces severe streaking artifacts and compromises image quality. This paper introduces a novel framework leveraging spatiotemporal Gaussian representation for 4D-CBCT reconstruction from sparse projections, achieving a balance between streak artifact reduction, dynamic motion preservation, and fine detail restoration. Each Gaussian is characterized by its 3D position, covariance, rotation, and density. Two-dimensional X-ray projection images can be rendered from the Gaussian point cloud representation via X-ray rasterization. The properties of each Gaussian were optimized by minimizing the discrepancy between the measured projections and the rendered X-ray projections. A Gaussian deformation network is jointly optimized to deform these Gaussian properties to obtain a 4D Gaussian representation for dynamic CBCT scene modeling. The final 4D-CBCT images are reconstructed by voxelizing the 4D Gaussians, achieving a high-quality representation that preserves both motion dynamics and spatial detail. The code and reconstruction results can be found at https://github.com/fuyabo/4DGS_for_4DCBCT/tree/main <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.04140v1-abstract-full').style.display = 'none'; document.getElementById('2501.04140v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 10 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.04054">arXiv:2501.04054</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.04054">pdf</a>, <a href="https://arxiv.org/ps/2501.04054">ps</a>, <a href="https://arxiv.org/format/2501.04054">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Strongly Correlated Electrons">cond-mat.str-el</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> </div> </div> <p class="title is-5 mathjax"> Hermitian and Non-Hermitian Topological Transitions Characterized by Manifold Distance </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Fang%2C+Z">ZhaoXiang Fang</a>, <a href="/search/?searchtype=author&amp;query=Gong%2C+M">Ming Gong</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+G">Guang-Can Guo</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yongxu Fu</a>, <a href="/search/?searchtype=author&amp;query=Xiong%2C+L">Long Xiong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.04054v1-abstract-short" style="display: inline;"> Topological phases are generally characterized by topological invariants denoted by integer numbers. However, different topological systems often require different topological invariants to measure, and theses definition usually fail at critical points. Therefore, it&#39;s challenging to predict what would occur during the transformation between two different topological phases. To address these issue&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.04054v1-abstract-full').style.display = 'inline'; document.getElementById('2501.04054v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.04054v1-abstract-full" style="display: none;"> Topological phases are generally characterized by topological invariants denoted by integer numbers. However, different topological systems often require different topological invariants to measure, and theses definition usually fail at critical points. Therefore, it&#39;s challenging to predict what would occur during the transformation between two different topological phases. To address these issues, we propose a general definition based on fidelity and trace distance from quantum information theory: manifold distance (MD). This definition does not rely on the berry connection but rather on the information of the two manifolds - their ground state wave functions. Thus, it can measure different topological systems (including traditional band topology models, non-Hermitian systems, and gapless systems, etc.) and exhibit some universal laws during the transformation between two topological phases. Our research demonstrates for different topological manifolds, the change rate (first-order derivative) or susceptibility (second-order derivative) of MD exhibit various divergent behaviors near the critical points. Compared to the strange correlator, which could be used as a diagnosis for short-range entangled states in 1D and 2D, MD is more universal and could be applied to non-Hermitian systems and long-range entangled states. For subsequent studies, we expect the method to be generalized to real-space or non-lattice models, in order to facilitate the study of a wider range of physical platforms such as open systems and many-body localization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.04054v1-abstract-full').style.display = 'none'; document.getElementById('2501.04054v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:2405.03323</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.03571">arXiv:2501.03571</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.03571">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> </div> </div> <p class="title is-5 mathjax"> AADNet: Exploring EEG Spatiotemporal Information for Fast and Accurate Orientation and Timbre Detection of Auditory Attention Based on A Cue-Masked Paradigm </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Shi%2C+K">Keren Shi</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+X">Xu Liu</a>, <a href="/search/?searchtype=author&amp;query=Yuan%2C+X">Xue Yuan</a>, <a href="/search/?searchtype=author&amp;query=Shang%2C+H">Haijie Shang</a>, <a href="/search/?searchtype=author&amp;query=Dai%2C+R">Ruiting Dai</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+H">Hanbin Wang</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yunfa Fu</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+N">Ning Jiang</a>, <a href="/search/?searchtype=author&amp;query=He%2C+J">Jiayuan He</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.03571v1-abstract-short" style="display: inline;"> Auditory attention decoding from electroencephalogram (EEG) could infer to which source the user is attending in noisy environments. Decoding algorithms and experimental paradigm designs are crucial for the development of technology in practical applications. To simulate real-world scenarios, this study proposed a cue-masked auditory attention paradigm to avoid information leakage before the exper&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.03571v1-abstract-full').style.display = 'inline'; document.getElementById('2501.03571v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.03571v1-abstract-full" style="display: none;"> Auditory attention decoding from electroencephalogram (EEG) could infer to which source the user is attending in noisy environments. Decoding algorithms and experimental paradigm designs are crucial for the development of technology in practical applications. To simulate real-world scenarios, this study proposed a cue-masked auditory attention paradigm to avoid information leakage before the experiment. To obtain high decoding accuracy with low latency, an end-to-end deep learning model, AADNet, was proposed to exploit the spatiotemporal information from the short time window of EEG signals. The results showed that with a 0.5-second EEG window, AADNet achieved an average accuracy of 93.46% and 91.09% in decoding auditory orientation attention (OA) and timbre attention (TA), respectively. It significantly outperformed five previous methods and did not need the knowledge of the original audio source. This work demonstrated that it was possible to detect the orientation and timbre of auditory attention from EEG signals fast and accurately. The results are promising for the real-time multi-property auditory attention decoding, facilitating the application of the neuro-steered hearing aids and other assistive listening devices. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.03571v1-abstract-full').style.display = 'none'; document.getElementById('2501.03571v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.03289">arXiv:2501.03289</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.03289">pdf</a>, <a href="https://arxiv.org/format/2501.03289">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Adaptive Pruning of Pretrained Transformer via Differential Inclusions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ding%2C+Y">Yizhuo Ding</a>, <a href="/search/?searchtype=author&amp;query=Fan%2C+K">Ke Fan</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yikai Wang</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+X">Xinwei Sun</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yanwei Fu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.03289v1-abstract-short" style="display: inline;"> Large transformers have demonstrated remarkable success, making it necessary to compress these models to reduce inference costs while preserving their perfor-mance. Current compression algorithms prune transformers at fixed compression ratios, requiring a unique pruning process for each ratio, which results in high computational costs. In contrast, we propose pruning of pretrained transformers at&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.03289v1-abstract-full').style.display = 'inline'; document.getElementById('2501.03289v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.03289v1-abstract-full" style="display: none;"> Large transformers have demonstrated remarkable success, making it necessary to compress these models to reduce inference costs while preserving their perfor-mance. Current compression algorithms prune transformers at fixed compression ratios, requiring a unique pruning process for each ratio, which results in high computational costs. In contrast, we propose pruning of pretrained transformers at any desired ratio within a single pruning stage, based on a differential inclusion for a mask parameter. This dynamic can generate the whole regularization solution path of the mask parameter, whose support set identifies the network structure. Therefore, the solution path identifies a Transformer weight family with various sparsity levels, offering greater flexibility and customization. In this paper, we introduce such an effective pruning method, termed SPP (Solution Path Pruning). To achieve effective pruning, we segment the transformers into paired modules, including query-key pairs, value-projection pairs, and sequential linear layers, and apply low-rank compression to these pairs, maintaining the output structure while enabling structural compression within the inner states. Extensive experiments conducted on various well-known transformer backbones have demonstrated the efficacy of SPP. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.03289v1-abstract-full').style.display = 'none'; document.getElementById('2501.03289v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.02795">arXiv:2501.02795</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.02795">pdf</a>, <a href="https://arxiv.org/format/2501.02795">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> InfiFusion: A Unified Framework for Enhanced Cross-Model Reasoning via LLM Fusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Yan%2C+Z">Zhaoyi Yan</a>, <a href="/search/?searchtype=author&amp;query=Sang%2C+Z">Zhijie Sang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Yiming Zhang</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yuhao Fu</a>, <a href="/search/?searchtype=author&amp;query=He%2C+B">Baoyi He</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Q">Qi Zhou</a>, <a href="/search/?searchtype=author&amp;query=Di%2C+Y">Yining Di</a>, <a href="/search/?searchtype=author&amp;query=Ji%2C+C">Chunlin Ji</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+S">Shengyu Zhang</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+F">Fei Wu</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+H">Hongxia Yang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.02795v2-abstract-short" style="display: inline;"> Large Language Models (LLMs) have demonstrated strong performance across various reasoning tasks, yet building a single model that consistently excels across all domains remains challenging. This paper addresses this problem by exploring strategies to integrate multiple domain-specialized models into an efficient pivot model.We propose two fusion strategies to combine the strengths of multiple LLM&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.02795v2-abstract-full').style.display = 'inline'; document.getElementById('2501.02795v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.02795v2-abstract-full" style="display: none;"> Large Language Models (LLMs) have demonstrated strong performance across various reasoning tasks, yet building a single model that consistently excels across all domains remains challenging. This paper addresses this problem by exploring strategies to integrate multiple domain-specialized models into an efficient pivot model.We propose two fusion strategies to combine the strengths of multiple LLMs: (1) a pairwise, multi-step fusion approach that sequentially distills each source model into the pivot model, followed by a weight merging step to integrate the distilled models into the final model. This method achieves strong performance but requires substantial training effort; and (2) a unified fusion approach that aggregates all source models&#39; outputs simultaneously.To improve the fusion process, we introduce a novel Rate-Skewness Adaptive Fusion (RSAF) technique, which dynamically adjusts top-K ratios during parameter merging for enhanced flexibility and stability.Furthermore, we propose an uncertainty-based weighting method for the unified approach, which dynamically balances the contributions of source models and outperforms other logits/distribution ensemble methods.We achieved accuracy improvements of 9.27%, 8.80%, and 8.89% on the GSM8K, MATH, and HumanEval tasks, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.02795v2-abstract-full').style.display = 'none'; document.getElementById('2501.02795v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.02594">arXiv:2501.02594</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.02594">pdf</a>, <a href="https://arxiv.org/format/2501.02594">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Observation of $蠄(3686) \to K^{-}螞(1520)\bar螢^{+} + c.c.$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (642 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.02594v1-abstract-short" style="display: inline;"> Based on $(2712.4 \pm 14.3)\times 10^6$ $蠄(3686)$ events collected at the BESIII detector operating at the BEPCII collider, we present the first observation of the decay $蠄(3686) \to K^{-}螞(1520)\bar螢^{+} + c.c.$. The product branching fraction ${\cal B}[蠄(3686) \to K^{-}螞(1520)\bar螢^{+} + c.c.] \times {\cal B}[螞(1520) \to pK^{-}]$ is measured to be $(9.5 \pm 0.8 \pm 1.1) \times 10^{-7}$, where th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.02594v1-abstract-full').style.display = 'inline'; document.getElementById('2501.02594v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.02594v1-abstract-full" style="display: none;"> Based on $(2712.4 \pm 14.3)\times 10^6$ $蠄(3686)$ events collected at the BESIII detector operating at the BEPCII collider, we present the first observation of the decay $蠄(3686) \to K^{-}螞(1520)\bar螢^{+} + c.c.$. The product branching fraction ${\cal B}[蠄(3686) \to K^{-}螞(1520)\bar螢^{+} + c.c.] \times {\cal B}[螞(1520) \to pK^{-}]$ is measured to be $(9.5 \pm 0.8 \pm 1.1) \times 10^{-7}$, where the first uncertainty is statistical and the second systematic. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.02594v1-abstract-full').style.display = 'none'; document.getElementById('2501.02594v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.01794">arXiv:2501.01794</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.01794">pdf</a>, <a href="https://arxiv.org/ps/2501.01794">ps</a>, <a href="https://arxiv.org/format/2501.01794">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> </div> </div> <p class="title is-5 mathjax"> Studying the divisibility of power LCM matrics by power GCD matrices on gcd-closed sets </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhao%2C+J">Jianrong Zhao</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+C">Chenxu Wang</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yu Fu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.01794v1-abstract-short" style="display: inline;"> Let $S=\{x_1,\ldots, x_n\}$ be a gcd-closed set (i.e. $(x_i,x_j)\in S $ for all $1\le i,j\le n$). In 2002, Hong proposed the divisibility problem of characterizing all gcd-closed sets $S$ with $|S|\ge 4$ such that the GCD matrix $(S)$ divides the LCM matrix $[S]$ in the ring $M_{n}(\mathbb{Z})$. For $x\in S,$ let $G_S(x):=\{z\in S: z&lt;x, z|x \text{ and } (z|y|x, y\in S)\Rightarrow y\in\{z,x\}\}$. I&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.01794v1-abstract-full').style.display = 'inline'; document.getElementById('2501.01794v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.01794v1-abstract-full" style="display: none;"> Let $S=\{x_1,\ldots, x_n\}$ be a gcd-closed set (i.e. $(x_i,x_j)\in S $ for all $1\le i,j\le n$). In 2002, Hong proposed the divisibility problem of characterizing all gcd-closed sets $S$ with $|S|\ge 4$ such that the GCD matrix $(S)$ divides the LCM matrix $[S]$ in the ring $M_{n}(\mathbb{Z})$. For $x\in S,$ let $G_S(x):=\{z\in S: z&lt;x, z|x \text{ and } (z|y|x, y\in S)\Rightarrow y\in\{z,x\}\}$. In 2009, Feng, Hong and Zhao answered this problem in the context where $\max_{x \in S}\{|G_S(x)|\} \leq 2$. In 2022, Zhao, Chen and Hong obtained a necessary and sufficient condition on the gcd-closed set $S$ with $\max_{x \in S}\{|G_S(x)|\}=3$ such that $(S)|\left[S\right].$ Meanwhile, they raised a conjecture on the necessary and sufficient condition such that $(S)|\left[S\right]$ holds for the remaining case $\max_{x \in S}\{|G_S(x)|\}\ge 4$. In this papar, we confirm the Zhao-Chen-Hong conjecture from a novel perspective, consequently solve Hong&#39;s open problem completely. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.01794v1-abstract-full').style.display = 'none'; document.getElementById('2501.01794v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.01661">arXiv:2501.01661</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.01661">pdf</a>, <a href="https://arxiv.org/ps/2501.01661">ps</a>, <a href="https://arxiv.org/format/2501.01661">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Search for $畏_c(2S)\to p\bar{p}K^+K^-$ and measurement of $蠂_{cJ}\to p\bar{p}K^+K^-$ in $蠄(3686)$ radiative decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a>, <a href="/search/?searchtype=author&amp;query=Brueggemann%2C+A">A. Brueggemann</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+H">H. Cai</a> , et al. (639 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.01661v1-abstract-short" style="display: inline;"> A search for $畏_c(2S)\to p\bar{p}K^+K^-$, together with measurement of branching fractions of $蠂_{cJ(J=0,1,2)}\to p\bar{p}K^+K^-$ in the $蠄(3686) \to 纬畏_c(2S)$ and the $蠄(3686) \to 纬蠂_{cJ}$ radiative decays, is performed with $(2712.4\pm14.3)\times 10^6$ $蠄(3686)$ events collected with the BESIII detector at the BEPCII collider. An evidence for $畏_c(2S)\to p\bar{p}K^+K^-$ is found, with a signific&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.01661v1-abstract-full').style.display = 'inline'; document.getElementById('2501.01661v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.01661v1-abstract-full" style="display: none;"> A search for $畏_c(2S)\to p\bar{p}K^+K^-$, together with measurement of branching fractions of $蠂_{cJ(J=0,1,2)}\to p\bar{p}K^+K^-$ in the $蠄(3686) \to 纬畏_c(2S)$ and the $蠄(3686) \to 纬蠂_{cJ}$ radiative decays, is performed with $(2712.4\pm14.3)\times 10^6$ $蠄(3686)$ events collected with the BESIII detector at the BEPCII collider. An evidence for $畏_c(2S)\to p\bar{p}K^+K^-$ is found, with a significance of $3.3蟽$. The product branching fraction of $\mathcal{B}[蠄(3686)\to纬畏_c(2S)]\cdot\mathcal{B}[畏_c(2S)\to p\bar{p}K^+K^-]$ is determined to be $(1.98\mkern 2mu\pm\mkern 2mu0.41_{\text{stat.}}\mkern 2mu\pm\mkern 2mu0.99_{\text{syst.}})\times 10^{-7}$. The product branching fractions of $\mathcal{B}[蠄(3686)\to纬蠂_{cJ}]\cdot\mathcal{B}[蠂_{cJ}\to p\bar{p}K^+K^-]$ are measured to be $(2.49\mkern 2mu\pm\mkern 2mu 0.03_{\text{stat.}}\mkern 2mu\pm\mkern 2mu 0.15_{\text{syst.}})\times 10^{-5}$, $(1.83\mkern 2mu \pm\mkern 2mu 0.02_{\text{stat.}}\mkern 2mu \pm\mkern 2mu 0.11_{\text{syst.}})\times 10^{-5}$, and $(2.43\mkern 2mu\pm\mkern 2mu 0.02_{\text{stat.}}\mkern 2mu\pm\mkern 2mu 0.15_{\text{syst.}})\times 10^{-5}$, for $J=0,\ 1$, and 2, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.01661v1-abstract-full').style.display = 'none'; document.getElementById('2501.01661v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.01622">arXiv:2501.01622</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.01622">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Mesoscale and Nanoscale Physics">cond-mat.mes-hall</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Strongly Correlated Electrons">cond-mat.str-el</span> </div> </div> <p class="title is-5 mathjax"> Visualization of intervalley coherent phase in PtSe2/HOPG heterojunction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Fan%2C+K">Kai Fan</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+B">Bohao Li</a>, <a href="/search/?searchtype=author&amp;query=Qiu%2C+W">Wen-Xuan Qiu</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+T">Ting-Fei Guo</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+J">Jian-Wang Zhou</a>, <a href="/search/?searchtype=author&amp;query=Xie%2C+T">Tao Xie</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+W">Wen-Hao Zhang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+C">Chao-Fei Liu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+F">Fengcheng Wu</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Ying-Shuang Fu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.01622v1-abstract-short" style="display: inline;"> Intervalley coherent (IVC) phase in graphene systems arises from the coherent superposition of wave functions of opposite valleys, whose direct microscopic visualization provides pivotal insight into the emergent physics but remains elusive. Here, we successfully visualize the IVC phase in a heterostructure of monolayer PtSe2 on highly oriented pyrolytic graphite. Using spectroscopic imaging scann&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.01622v1-abstract-full').style.display = 'inline'; document.getElementById('2501.01622v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.01622v1-abstract-full" style="display: none;"> Intervalley coherent (IVC) phase in graphene systems arises from the coherent superposition of wave functions of opposite valleys, whose direct microscopic visualization provides pivotal insight into the emergent physics but remains elusive. Here, we successfully visualize the IVC phase in a heterostructure of monolayer PtSe2 on highly oriented pyrolytic graphite. Using spectroscopic imaging scanning tunneling microscopy, we observe a Root3 by Root3 modulation pattern superimposed on the higher-order moire superlattice of the heterostructure, which correlates with a small gap opening around the Fermi level and displays an anti-phase real-space conductance distribution of the two gap edges. Such modulation pattern and small-gap vanish on the heterostructure of monolayer PtSe2 on bilayer-graphene-covered SiC substrate, due to the increased carrier density in the bilayer graphene. We provide a theoretical mechanism that the Root3 by Root3 modulation pattern originates from the IVC phase of few-layer graphene, which is magnified by the higher-order moire superlattice. Our work achieves visualization of the IVC phase, and develops an avenue for its generation and amplification via a moir茅 interface. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.01622v1-abstract-full').style.display = 'none'; document.getElementById('2501.01622v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.01616">arXiv:2501.01616</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.01616">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Digital-Analog Transmission based Emergency Semantic Communications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yuzhou Fu</a>, <a href="/search/?searchtype=author&amp;query=Cheng%2C+W">Wenchi Cheng</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+J">Jingqing Wang</a>, <a href="/search/?searchtype=author&amp;query=Yin%2C+L">Liuguo Yin</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+W">Wei Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.01616v1-abstract-short" style="display: inline;"> Emergency Wireless Communication (EWC) networks adopt the User Datagram Protocol (UDP) to transmit scene images in real time for quickly assessing the extent of the damage. However, existing UDP-based EWC exhibits suboptimal performance under poor channel conditions since UDP lacks an Automatic Repeat reQuest (ARQ) mechanism. In addition, future EWC systems must not only enhance human decisionmaki&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.01616v1-abstract-full').style.display = 'inline'; document.getElementById('2501.01616v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.01616v1-abstract-full" style="display: none;"> Emergency Wireless Communication (EWC) networks adopt the User Datagram Protocol (UDP) to transmit scene images in real time for quickly assessing the extent of the damage. However, existing UDP-based EWC exhibits suboptimal performance under poor channel conditions since UDP lacks an Automatic Repeat reQuest (ARQ) mechanism. In addition, future EWC systems must not only enhance human decisionmaking during emergency response operations but also support Artificial Intelligence (AI)-driven approaches to improve rescue efficiency. The Deep Learning-based Semantic Communication (DL-based SemCom) emerges as a robust, efficient, and taskoriented transmission scheme, suitable for deployment in UDP based EWC. Due to the constraints in hardware capabilities and transmission resources, the EWC transmitter is unable to integrate sufficiently powerful NN model, thereby failing to achieve ideal performance under EWC scene. For EWC scene, we propose a performance-constrained semantic coding model, which considers the effects of the semantic noise and the channel noise. Then, we derive Cramer-Rao lower bound of the proposed semantic coding model, as guidance for the design of semantic codec to enhance its adaptability to semantic noise as well as channel noise. To further improve the system performance, we propose Digital-Analog transmission based Emergency Semantic Communication (DAESemCom) framework, which integrates the analog DL-based semantic coding and the digital Distributed Source Coding (DSC) schemes to leverage their respective advantages. The simulation results show that the proposed DA-ESemCom framework outperforms the classical Separated Source-Channel Coding (SSCC) and other DL-based Joint Source-Channel Coding (DL-based JSCC) schemes in terms of fidelity and detection performances. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.01616v1-abstract-full').style.display = 'none'; document.getElementById('2501.01616v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.20993">arXiv:2412.20993</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.20993">pdf</a>, <a href="https://arxiv.org/format/2412.20993">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Efficiently Serving LLM Reasoning Programs with Certaindex </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yichao Fu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+J">Junda Chen</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+S">Siqi Zhu</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Z">Zheyu Fu</a>, <a href="/search/?searchtype=author&amp;query=Dai%2C+Z">Zhongdongming Dai</a>, <a href="/search/?searchtype=author&amp;query=Qiao%2C+A">Aurick Qiao</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+H">Hao Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.20993v1-abstract-short" style="display: inline;"> The rapid evolution of large language models (LLMs) has unlocked their capabilities in advanced reasoning tasks like mathematical problem-solving, code generation, and legal analysis. Central to this progress are inference-time reasoning algorithms, which refine outputs by exploring multiple solution paths, at the cost of increasing compute demands and response latencies. Existing serving systems&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.20993v1-abstract-full').style.display = 'inline'; document.getElementById('2412.20993v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.20993v1-abstract-full" style="display: none;"> The rapid evolution of large language models (LLMs) has unlocked their capabilities in advanced reasoning tasks like mathematical problem-solving, code generation, and legal analysis. Central to this progress are inference-time reasoning algorithms, which refine outputs by exploring multiple solution paths, at the cost of increasing compute demands and response latencies. Existing serving systems fail to adapt to the scaling behaviors of these algorithms or the varying difficulty of queries, leading to inefficient resource use and unmet latency targets. We present Dynasor, a system that optimizes inference-time compute for LLM reasoning queries. Unlike traditional engines, Dynasor tracks and schedules requests within reasoning queries and uses Certaindex, a proxy that measures statistical reasoning progress based on model certainty, to guide compute allocation dynamically. Dynasor co-adapts scheduling with reasoning progress: it allocates more compute to hard queries, reduces compute for simpler ones, and terminates unpromising queries early, balancing accuracy, latency, and cost. On diverse datasets and algorithms, Dynasor reduces compute by up to 50% in batch processing and sustaining 3.3x higher query rates or 4.7x tighter latency SLOs in online serving. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.20993v1-abstract-full').style.display = 'none'; document.getElementById('2412.20993v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.20305">arXiv:2412.20305</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.20305">pdf</a>, <a href="https://arxiv.org/ps/2412.20305">ps</a>, <a href="https://arxiv.org/format/2412.20305">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Measurement of Born cross section of $e^+e^-\to危^0\bar危^0$ at $\sqrt{s} = 3.50-4.95$ GeV </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a>, <a href="/search/?searchtype=author&amp;query=Brueggemann%2C+A">A. Brueggemann</a> , et al. (649 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.20305v1-abstract-short" style="display: inline;"> Using $e^+e^-$ collision data collected with the BESIII detector at the BEPCII collider at thirty-two center-of-mass energies from 3.50 to 4.95 GeV, corresponding to an integrated luminosity of 25 $\rm{fb^{-1}}$, we measure the Born cross section of the $e^+e^-\to危^0\bar危^0$ reaction and the effective form factor. No significant charmonium(-like) state, i.e., $蠄(3770)$, $蠄(4040)$, $蠄(4160)$,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.20305v1-abstract-full').style.display = 'inline'; document.getElementById('2412.20305v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.20305v1-abstract-full" style="display: none;"> Using $e^+e^-$ collision data collected with the BESIII detector at the BEPCII collider at thirty-two center-of-mass energies from 3.50 to 4.95 GeV, corresponding to an integrated luminosity of 25 $\rm{fb^{-1}}$, we measure the Born cross section of the $e^+e^-\to危^0\bar危^0$ reaction and the effective form factor. No significant charmonium(-like) state, i.e., $蠄(3770)$, $蠄(4040)$, $蠄(4160)$, $蠄(4230)$, $蠄(4360)$, $蠄(4415)$, or $蠄(4660)$, decaying into the $危^0\bar危^0$ final state is observed by fitting the $e^+e^- \to 危^0\bar危^0$ dressed cross section. The upper limits for the product of the branching fraction and the electronic partial width at the 90% confidence level are provided for each assumed charmonium(-like) state. In addition, the ratios of the Born cross section and the effective form factor between the $e^+e^-\to危^0\bar危^0$ and the $e^+e^-\to危^+\bar危^-$ reactions are provided, which can be used to validate the prediction of the vector meson dominance model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.20305v1-abstract-full').style.display = 'none'; document.getElementById('2412.20305v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 3 figures, 1 Supplemental Material</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.20009">arXiv:2412.20009</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.20009">pdf</a>, <a href="https://arxiv.org/format/2412.20009">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/1674-1056/ad9017">10.1088/1674-1056/ad9017 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> SolarDesign: An Online Photovoltaic Device Simulation and Design Platform </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Sha%2C+W+E+I">Wei E. I. Sha</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+X">Xiaoyu Wang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+W">Wenchao Chen</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yuhao Fu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+L">Lijun Zhang</a>, <a href="/search/?searchtype=author&amp;query=Tian%2C+L">Liang Tian</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+M">Minshen Lin</a>, <a href="/search/?searchtype=author&amp;query=Jiao%2C+S">Shudi Jiao</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+T">Ting Xu</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+T">Tiange Sun</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+D">Dongxue Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.20009v1-abstract-short" style="display: inline;"> SolarDesign (https://solardesign.cn/) is an online photovoltaic device simulation and design platform that provides engineering modeling analysis for crystalline silicon solar cells, as well as emerging high-efficiency solar cells such as organic, perovskite, and tandem cells. The platform offers user-updatable libraries of basic photovoltaic materials and devices, device-level multi-physics simul&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.20009v1-abstract-full').style.display = 'inline'; document.getElementById('2412.20009v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.20009v1-abstract-full" style="display: none;"> SolarDesign (https://solardesign.cn/) is an online photovoltaic device simulation and design platform that provides engineering modeling analysis for crystalline silicon solar cells, as well as emerging high-efficiency solar cells such as organic, perovskite, and tandem cells. The platform offers user-updatable libraries of basic photovoltaic materials and devices, device-level multi-physics simulations involving optical-electrical-thermal interactions, and circuit-level compact model simulations based on detailed balance theory. Employing internationally advanced numerical methods, the platform accurately, rapidly, and efficiently solves optical absorption, electrical transport, and compact circuit models. It achieves multi-level photovoltaic simulation technology from ``materials to devices to circuits&#39;&#39; with fully independent intellectual property rights. Compared to commercial software, the platform achieves high accuracy and improves speed by more than an order of magnitude. Additionally, it can simulate unique electrical transport processes in emerging solar cells, such as quantum tunneling, exciton dissociation, and ion migration. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.20009v1-abstract-full').style.display = 'none'; document.getElementById('2412.20009v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 7 figures, 3 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Chinese Physics B 34(1): 018801, 2024 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.19702">arXiv:2412.19702</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.19702">pdf</a>, <a href="https://arxiv.org/ps/2412.19702">ps</a>, <a href="https://arxiv.org/format/2412.19702">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Search for the double Dalitz decays $畏/畏&#39; \to e^+e^-渭^+渭^-$ and $畏&#39; \to 渭^+渭^-渭^+渭^-$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a>, <a href="/search/?searchtype=author&amp;query=Brueggemann%2C+A">A. Brueggemann</a> , et al. (648 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.19702v1-abstract-short" style="display: inline;"> Using a data sample of $(10087 \pm 44) \times {10^{6}}$ $J/蠄$ events collected with the BESIII detector, we search for the decays $畏/畏&#39;\to e^+e^-渭^+渭^-$ and $畏&#39; \to 渭^+渭^-渭^+渭^-$ via the radiative decays $J/蠄\to纬畏$/$纬畏&#39;$. No excess of events over expected background is observed for any of the decays of interest. At 90% confidence level, we report the first upper limits on the branching fractions o&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.19702v1-abstract-full').style.display = 'inline'; document.getElementById('2412.19702v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.19702v1-abstract-full" style="display: none;"> Using a data sample of $(10087 \pm 44) \times {10^{6}}$ $J/蠄$ events collected with the BESIII detector, we search for the decays $畏/畏&#39;\to e^+e^-渭^+渭^-$ and $畏&#39; \to 渭^+渭^-渭^+渭^-$ via the radiative decays $J/蠄\to纬畏$/$纬畏&#39;$. No excess of events over expected background is observed for any of the decays of interest. At 90% confidence level, we report the first upper limits on the branching fractions of $畏&#39; \to e^{+}e^{-}渭^{+}渭^{-}$ and $畏&#39; \to 渭^{+}渭^{-}渭^{+}渭^{-}$ to be $ 1.75 \times {10^{-6}}$ and $5.28 \times {10^{-7}}$, respectively. In addition, we set an upper limit on the branching fraction of $畏\to e^{+}e^{-}渭^{+}渭^{-}$ to be $6.88 \times {10^{-6}}$, which improves the previous result by about two orders of magnitude. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.19702v1-abstract-full').style.display = 'none'; document.getElementById('2412.19702v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Fu%2C+Y&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10