CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 4,752 results for author: <span class="mathjax">Liu, W</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/" aria-role="search"> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Liu, W"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Liu%2C+W&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Liu, W"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Liu%2C+W&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Liu%2C+W&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Liu%2C+W&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Liu%2C+W&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Liu%2C+W&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Liu%2C+W&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.17480">arXiv:2411.17480</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.17480">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> </div> <p class="title is-5 mathjax"> Ultra-low-loss slow-light thin-film lithium-niobate optical modulator </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Li%2C+C">Chenlei Li</a>, <a href="/search/?searchtype=author&amp;query=He%2C+J">Jianghao He</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+M">Ming Zhang</a>, <a href="/search/?searchtype=author&amp;query=Tong%2C+Y">Yeyu Tong</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Weixi Liu</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+S">Siyuan Wang</a>, <a href="/search/?searchtype=author&amp;query=Song%2C+L">Lijia Song</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+H">Hongxuan Liu</a>, <a href="/search/?searchtype=author&amp;query=Cao%2C+H">Hengzhen Cao</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+L">Liu Liu</a>, <a href="/search/?searchtype=author&amp;query=Shi%2C+Y">Yaocheng Shi</a>, <a href="/search/?searchtype=author&amp;query=Dai%2C+D">Daoxin Dai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.17480v1-abstract-short" style="display: inline;"> Electro-optic modulators for next-generation optical interconnects require low loss-efficiency products, compact footprints, high modulation efficiency, broad bandwidths, and low losses. Here we propose and demonstrate a low-loss high-efficiency thin-film lithium-niobate Mach Zehnder modulator enabled by a novel ultralow-loss slow-light structure based on apodized gratings in cascade. The present&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17480v1-abstract-full').style.display = 'inline'; document.getElementById('2411.17480v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.17480v1-abstract-full" style="display: none;"> Electro-optic modulators for next-generation optical interconnects require low loss-efficiency products, compact footprints, high modulation efficiency, broad bandwidths, and low losses. Here we propose and demonstrate a low-loss high-efficiency thin-film lithium-niobate Mach Zehnder modulator enabled by a novel ultralow-loss slow-light structure based on apodized gratings in cascade. The present loss-engineered slow-light structure achieves excess losses as low as 0.6 dB/mm experimentally, which is tens of times lower than conventional slow-light structures, and a high modulation bandwidth up to 320GHz in theory is achieved with optimally-designed capacitively-loaded traveling-wave electrodes. Experimentally, the fabricated slow-light modulator with a 2.8-mm-long modulation region has an ultra-low loss-efficiency product of 7.4 VdB and a flat electro-optic response up to 67 GHz, enabling 100-Gbps on-off keying with high ERs of 4.5 dB at a low driving voltage of 2Vpp, while 200-Gbps PAM4 and 150-Gbps PAM8 signals are also generated to show great promise for advanced modulation formats. In particular, it has also achieved the highest figure-of-merit(FOM) of 182 for high-speed optical modulation , including the bit rate, the extinction ratio normalized with respective to Vpp, the modulation efficiency. The outstanding performance of the present apodized-grating-based slow-light modulator shows great potential and paves the way for developing high-speed optical interconnects for both data-centers and high-performance computing systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17480v1-abstract-full').style.display = 'none'; document.getElementById('2411.17480v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16978">arXiv:2411.16978</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16978">pdf</a>, <a href="https://arxiv.org/ps/2411.16978">ps</a>, <a href="https://arxiv.org/format/2411.16978">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Econometrics">econ.EM</span> </div> </div> <p class="title is-5 mathjax"> Normal Approximation for U-Statistics with Cross-Sectional Dependence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Weiguang Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16978v1-abstract-short" style="display: inline;"> We apply Stein&#39;s method to investigate the normal approximation for both non-degenerate and degenerate U-statistics with cross-sectionally dependent underlying processes in the Wasserstein metric. We show that the convergence rates depend on the mixing rates, the sparsity of the cross-sectional dependence, and the moments of the kernel functions. Conditions are derived for central limit theorems t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16978v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16978v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16978v1-abstract-full" style="display: none;"> We apply Stein&#39;s method to investigate the normal approximation for both non-degenerate and degenerate U-statistics with cross-sectionally dependent underlying processes in the Wasserstein metric. We show that the convergence rates depend on the mixing rates, the sparsity of the cross-sectional dependence, and the moments of the kernel functions. Conditions are derived for central limit theorems to hold as corollaries. We demonstrate one application of the theoretical results with nonparametric specification test for data with cross-sectional dependence. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16978v1-abstract-full').style.display = 'none'; document.getElementById('2411.16978v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16158">arXiv:2411.16158</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16158">pdf</a>, <a href="https://arxiv.org/format/2411.16158">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> MixPE: Quantization and Hardware Co-design for Efficient LLM Inference </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Yu Zhang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+M">Mingzi Wang</a>, <a href="/search/?searchtype=author&amp;query=Zou%2C+L">Lancheng Zou</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wulong Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhen%2C+H">Hui-Ling Zhen</a>, <a href="/search/?searchtype=author&amp;query=Yuan%2C+M">Mingxuan Yuan</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+B">Bei Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16158v1-abstract-short" style="display: inline;"> Transformer-based large language models (LLMs) have achieved remarkable success as model sizes continue to grow, yet their deployment remains challenging due to significant computational and memory demands. Quantization has emerged as a promising solution, and state-of-the-art quantization algorithms for LLMs introduce the need for mixed-precision matrix multiplication (mpGEMM), where lower-precis&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16158v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16158v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16158v1-abstract-full" style="display: none;"> Transformer-based large language models (LLMs) have achieved remarkable success as model sizes continue to grow, yet their deployment remains challenging due to significant computational and memory demands. Quantization has emerged as a promising solution, and state-of-the-art quantization algorithms for LLMs introduce the need for mixed-precision matrix multiplication (mpGEMM), where lower-precision weights are multiplied with higher-precision activations. Despite its benefits, current hardware accelerators such as GPUs and TPUs lack native support for efficient mpGEMM, leading to inefficient dequantization operations in the main sequential loop. To address this limitation, we introduce MixPE, a specialized mixed-precision processing element designed for efficient low-bit quantization in LLM inference. MixPE leverages two key innovations to minimize dequantization overhead and unlock the full potential of low-bit quantization. First, recognizing that scale and zero point are shared within each quantization group, we propose performing dequantization after per-group mpGEMM, significantly reducing dequantization overhead. Second, instead of relying on conventional multipliers, MixPE utilizes efficient shift\&amp;add operations for multiplication, optimizing both computation and energy efficiency. Our experimental results demonstrate that MixPE surpasses the state-of-the-art quantization accelerators by $2.6\times$ speedup and $1.4\times$ energy reduction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16158v1-abstract-full').style.display = 'none'; document.getElementById('2411.16158v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16081">arXiv:2411.16081</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16081">pdf</a>, <a href="https://arxiv.org/format/2411.16081">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Exploring the Generalization Capabilities of AID-based Bi-level Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+C">Congliang Chen</a>, <a href="/search/?searchtype=author&amp;query=Shen%2C+L">Li Shen</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+Z">Zhiqiang Xu</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wei Liu</a>, <a href="/search/?searchtype=author&amp;query=Luo%2C+Z">Zhi-Quan Luo</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+P">Peilin Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16081v1-abstract-short" style="display: inline;"> Bi-level optimization has achieved considerable success in contemporary machine learning applications, especially for given proper hyperparameters. However, due to the two-level optimization structure, commonly, researchers focus on two types of bi-level optimization methods: approximate implicit differentiation (AID)-based and iterative differentiation (ITD)-based approaches. ITD-based methods ca&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16081v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16081v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16081v1-abstract-full" style="display: none;"> Bi-level optimization has achieved considerable success in contemporary machine learning applications, especially for given proper hyperparameters. However, due to the two-level optimization structure, commonly, researchers focus on two types of bi-level optimization methods: approximate implicit differentiation (AID)-based and iterative differentiation (ITD)-based approaches. ITD-based methods can be readily transformed into single-level optimization problems, facilitating the study of their generalization capabilities. In contrast, AID-based methods cannot be easily transformed similarly but must stay in the two-level structure, leaving their generalization properties enigmatic. In this paper, although the outer-level function is nonconvex, we ascertain the uniform stability of AID-based methods, which achieves similar results to a single-level nonconvex problem. We conduct a convergence analysis for a carefully chosen step size to maintain stability. Combining the convergence and stability results, we give the generalization ability of AID-based bi-level optimization methods. Furthermore, we carry out an ablation study of the parameters and assess the performance of these methods on real-world tasks. Our experimental results corroborate the theoretical findings, demonstrating the effectiveness and potential applications of these methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16081v1-abstract-full').style.display = 'none'; document.getElementById('2411.16081v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15752">arXiv:2411.15752</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15752">pdf</a>, <a href="https://arxiv.org/format/2411.15752">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Measurement of cross sections of $e^+e^-\to K^0_S K^0_S 蠄(3686)$ from $\sqrt{s}=$ 4.682 to 4.951 GeV </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (642 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15752v1-abstract-short" style="display: inline;"> The process $e^+e^-\to K^0_S K^0_S 蠄(3686)$ is studied by analyzing $e^+e^-$ collision data samples collected at eight center-of-mass energies ranging from 4.682 to 4.951 GeV with the BESIII detector operating at the BEPCII collider, corresponding to an integrated luminosity of $4.1~{\rm fb}^{-1}$. Observation of the $e^+e^-\to K^0_S K^0_S 蠄(3686)$ process is found for the first time with a statis&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15752v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15752v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15752v1-abstract-full" style="display: none;"> The process $e^+e^-\to K^0_S K^0_S 蠄(3686)$ is studied by analyzing $e^+e^-$ collision data samples collected at eight center-of-mass energies ranging from 4.682 to 4.951 GeV with the BESIII detector operating at the BEPCII collider, corresponding to an integrated luminosity of $4.1~{\rm fb}^{-1}$. Observation of the $e^+e^-\to K^0_S K^0_S 蠄(3686)$ process is found for the first time with a statistical significance of $6.3蟽$, and the cross sections at each center-of-mass energy are measured. The ratio of cross sections of $e^+e^-\to K_S^0 K_S^0 蠄(3686)$ relative to $e^+e^-\to K^+ K^- 蠄(3686)$ is determined to be $\frac{蟽(e^+e^-\to K_S^0 K_S^0 蠄(3686))}{蟽(e^+e^-\to K^+ K^- 蠄(3686))}=0.45 \pm 0.25$, which is consistent with the prediction based on isospin symmetry. The uncertainty includes both statistical and systematic contributions. Additionally, the $K_S^0蠄(3686)$ invariant mass distribution is found to be consistent with three-body phase space. The significance of a contribution beyond three-body phase space is only $0.8蟽$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15752v1-abstract-full').style.display = 'none'; document.getElementById('2411.15752v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15729">arXiv:2411.15729</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15729">pdf</a>, <a href="https://arxiv.org/format/2411.15729">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> OccludeNet: A Causal Journey into Mixed-View Actor-Centric Video Action Recognition under Occlusions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+G">Guanyu Zhou</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenxuan Liu</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenxin Huang</a>, <a href="/search/?searchtype=author&amp;query=Jia%2C+X">Xuemei Jia</a>, <a href="/search/?searchtype=author&amp;query=Zhong%2C+X">Xian Zhong</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+C">Chia-Wen Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15729v1-abstract-short" style="display: inline;"> The lack of occlusion data in commonly used action recognition video datasets limits model robustness and impedes sustained performance improvements. We construct OccludeNet, a large-scale occluded video dataset that includes both real-world and synthetic occlusion scene videos under various natural environments. OccludeNet features dynamic tracking occlusion, static scene occlusion, and multi-vie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15729v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15729v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15729v1-abstract-full" style="display: none;"> The lack of occlusion data in commonly used action recognition video datasets limits model robustness and impedes sustained performance improvements. We construct OccludeNet, a large-scale occluded video dataset that includes both real-world and synthetic occlusion scene videos under various natural environments. OccludeNet features dynamic tracking occlusion, static scene occlusion, and multi-view interactive occlusion, addressing existing gaps in data. Our analysis reveals that occlusion impacts action classes differently, with actions involving low scene relevance and partial body visibility experiencing greater accuracy degradation. To overcome the limitations of current occlusion-focused approaches, we propose a structural causal model for occluded scenes and introduce the Causal Action Recognition (CAR) framework, which employs backdoor adjustment and counterfactual reasoning. This framework enhances key actor information, improving model robustness to occlusion. We anticipate that the challenges posed by OccludeNet will stimulate further exploration of causal relations in occlusion scenarios and encourage a reevaluation of class correlations, ultimately promoting sustainable performance improvements. The code and full dataset will be released soon. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15729v1-abstract-full').style.display = 'none'; document.getElementById('2411.15729v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15630">arXiv:2411.15630</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15630">pdf</a>, <a href="https://arxiv.org/format/2411.15630">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> A 400Gbit Ethernet core enabling High Data Rate Streaming from FPGAs to Servers and GPUs in Radio Astronomy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wei Liu</a>, <a href="/search/?searchtype=author&amp;query=Burnett%2C+M+C">Mitchell C. Burnett</a>, <a href="/search/?searchtype=author&amp;query=Werthimer%2C+D">Dan Werthimer</a>, <a href="/search/?searchtype=author&amp;query=Kocz%2C+J">Jonathon Kocz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15630v1-abstract-short" style="display: inline;"> The increased bandwidth coupled with the large numbers of antennas of several new radio telescope arrays has resulted in an exponential increase in the amount of data that needs to be recorded and processed. In many cases, it is necessary to process this data in real time, as the raw data volumes are too high to be recorded and stored. Due to the ability of graphics processing units (GPUs) to proc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15630v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15630v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15630v1-abstract-full" style="display: none;"> The increased bandwidth coupled with the large numbers of antennas of several new radio telescope arrays has resulted in an exponential increase in the amount of data that needs to be recorded and processed. In many cases, it is necessary to process this data in real time, as the raw data volumes are too high to be recorded and stored. Due to the ability of graphics processing units (GPUs) to process data in parallel, GPUs are increasingly used for data-intensive tasks. In most radio astronomy digital instrumentation (e.g. correlators for spectral imaging, beamforming, pulsar, fast radio burst and SETI searching), the processing power of modern GPUs is limited by the input/output data rate, not by the GPU&#39;s computation ability. Techniques for streaming ultra-high-rate data to GPUs, such as those described in this paper, reduce the number of GPUs and servers needed, and make significant reductions in the cost, power consumption, size, and complexity of GPU based radio astronomy backends. In this research, we developed and tested several different techniques to stream data from network interface cards (NICs) to GPUs. We also developed an open-source UDP/IPv4 400GbE wrapper for the AMD/Xilinx IP demonstrating high-speed data stream transfer from a field programmable gate array (FPGA) to GPU. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15630v1-abstract-full').style.display = 'none'; document.getElementById('2411.15630v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 29 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15441">arXiv:2411.15441</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15441">pdf</a>, <a href="https://arxiv.org/format/2411.15441">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Study of $\it螞_{\it{b}}^\rm{0}$ and $\it螢_{\it{b}}^\rm{0}$ decays to $\it螞 h^+h^{&#39;-}$ and evidence for $CP$ violation in $\it螞_{\it{b}}^\rm{0}\to\it螞 K^+K^-$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1129 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15441v1-abstract-short" style="display: inline;"> A study of $\it螞_{\it{b}}^\rm{0}$ and $\it螢_{\it{b}}^\rm{0}$ decays to $\it螞 h^{+} h^{\prime -}$ $(h^{(\prime)}=蟺, K)$ is performed using $pp$ collision data collected by the LHCb experiment during LHC Runs 1$-$2, corresponding to an integrated luminosity of $9~\rm{fb}^{-1}$. The branching fractions for these decays are measured using the $\it螞_{\it{b}}^\rm{0}\to\it螞_{\it{c}}^+(\to\it螞蟺^+)蟺^-$ dec&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15441v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15441v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15441v1-abstract-full" style="display: none;"> A study of $\it螞_{\it{b}}^\rm{0}$ and $\it螢_{\it{b}}^\rm{0}$ decays to $\it螞 h^{+} h^{\prime -}$ $(h^{(\prime)}=蟺, K)$ is performed using $pp$ collision data collected by the LHCb experiment during LHC Runs 1$-$2, corresponding to an integrated luminosity of $9~\rm{fb}^{-1}$. The branching fractions for these decays are measured using the $\it螞_{\it{b}}^\rm{0}\to\it螞_{\it{c}}^+(\to\it螞蟺^+)蟺^-$ decay as control channel. The decays $\it螞_{\it{b}}^\rm{0}\to\it螞蟺^+蟺^-$ and $\it螢_{\it{b}}^\rm{0}\to\it螞K^-蟺^+$ are observed for the first time. For decay modes with sufficient signal yields, $CP$ asymmetries are measured in the full and localized regions of the final-state phase space. Evidence is found for $CP$ violation in the $\it螞_{\it{b}}^\rm{0}\to\it螞K^+K^-$ decay, interpreted as originating primarily from an asymmetric $\it螞_{\it{b}}^\rm{0} \to \it{N}^{*+} \it{K}^-$ decay amplitude. The measured $CP$ asymmetries for the other decays are compatible with zero. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15441v1-abstract-full').style.display = 'none'; document.getElementById('2411.15441v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with any supplementary material and additional information, are available at https://cern.ch/lhcbproject/Publications/p/LHCb-PAPER-2024-043.html (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-043, CERN-EP-2024-281 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14721">arXiv:2411.14721</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14721">pdf</a>, <a href="https://arxiv.org/format/2411.14721">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> MolReFlect: Towards In-Context Fine-grained Alignments between Molecules and Texts </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Li%2C+J">Jiatong Li</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Y">Yunqing Liu</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wei Liu</a>, <a href="/search/?searchtype=author&amp;query=Le%2C+J">Jingdi Le</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+D">Di Zhang</a>, <a href="/search/?searchtype=author&amp;query=Fan%2C+W">Wenqi Fan</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+D">Dongzhan Zhou</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yuqiang Li</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Q">Qing Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14721v1-abstract-short" style="display: inline;"> Molecule discovery is a pivotal research field, impacting everything from the medicines we take to the materials we use. Recently, Large Language Models (LLMs) have been widely adopted in molecule understanding and generation, yet the alignments between molecules and their corresponding captions remain a significant challenge. Previous endeavours often treat the molecule as a general SMILES string&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14721v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14721v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14721v1-abstract-full" style="display: none;"> Molecule discovery is a pivotal research field, impacting everything from the medicines we take to the materials we use. Recently, Large Language Models (LLMs) have been widely adopted in molecule understanding and generation, yet the alignments between molecules and their corresponding captions remain a significant challenge. Previous endeavours often treat the molecule as a general SMILES string or molecular graph, neglecting the fine-grained alignments between the molecular sub-structures and the descriptive textual phrases, which are crucial for accurate and explainable predictions. In this case, we introduce MolReFlect, a novel teacher-student framework designed to contextually perform the molecule-caption alignments in a fine-grained way. Our approach initially leverages a larger teacher LLM to label the detailed alignments by directly extracting critical phrases from molecule captions or SMILES strings and implying them to corresponding sub-structures or characteristics. To refine these alignments, we propose In-Context Selective Reflection, which retrieves previous extraction results as context examples for teacher LLM to reflect and lets a smaller student LLM select from in-context reflection and previous extraction results. Finally, we enhance the learning process of the student LLM through Chain-of-Thought In-Context Molecule Tuning, integrating the fine-grained alignments and the reasoning processes within the Chain-of-Thought format. Our experimental results demonstrate that MolReFlect enables LLMs like Mistral-7B to significantly outperform the previous baselines, achieving SOTA performance on the ChEBI-20 dataset. This advancement not only enhances the generative capabilities of LLMs in the molecule-caption translation task, but also contributes to a more explainable framework. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14721v1-abstract-full').style.display = 'none'; document.getElementById('2411.14721v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 12 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14502">arXiv:2411.14502</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14502">pdf</a>, <a href="https://arxiv.org/format/2411.14502">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Global Challenge for Safe and Secure LLMs Track 1 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Jia%2C+X">Xiaojun Jia</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+Y">Yihao Huang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/?searchtype=author&amp;query=Tan%2C+P+Y">Peng Yan Tan</a>, <a href="/search/?searchtype=author&amp;query=Yau%2C+W+K">Weng Kuan Yau</a>, <a href="/search/?searchtype=author&amp;query=Mak%2C+M">Mun-Thye Mak</a>, <a href="/search/?searchtype=author&amp;query=Sim%2C+X+M">Xin Ming Sim</a>, <a href="/search/?searchtype=author&amp;query=Ng%2C+W+S">Wee Siong Ng</a>, <a href="/search/?searchtype=author&amp;query=Ng%2C+S+K">See Kiong Ng</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+H">Hanqing Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+L">Lifeng Zhou</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+H">Huanqian Yan</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+X">Xiaobing Sun</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wei Liu</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+L">Long Wang</a>, <a href="/search/?searchtype=author&amp;query=Qian%2C+Y">Yiming Qian</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Y">Yong Liu</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+J">Junxiao Yang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Z">Zhexin Zhang</a>, <a href="/search/?searchtype=author&amp;query=Lei%2C+L">Leqi Lei</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+R">Renmiao Chen</a>, <a href="/search/?searchtype=author&amp;query=Lu%2C+Y">Yida Lu</a>, <a href="/search/?searchtype=author&amp;query=Cui%2C+S">Shiyao Cui</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Z">Zizhou Wang</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+S">Shaohua Li</a> , et al. (5 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14502v1-abstract-short" style="display: inline;"> This paper introduces the Global Challenge for Safe and Secure Large Language Models (LLMs), a pioneering initiative organized by AI Singapore (AISG) and the CyberSG R&amp;D Programme Office (CRPO) to foster the development of advanced defense mechanisms against automated jailbreaking attacks. With the increasing integration of LLMs in critical sectors such as healthcare, finance, and public administr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14502v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14502v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14502v1-abstract-full" style="display: none;"> This paper introduces the Global Challenge for Safe and Secure Large Language Models (LLMs), a pioneering initiative organized by AI Singapore (AISG) and the CyberSG R&amp;D Programme Office (CRPO) to foster the development of advanced defense mechanisms against automated jailbreaking attacks. With the increasing integration of LLMs in critical sectors such as healthcare, finance, and public administration, ensuring these models are resilient to adversarial attacks is vital for preventing misuse and upholding ethical standards. This competition focused on two distinct tracks designed to evaluate and enhance the robustness of LLM security frameworks. Track 1 tasked participants with developing automated methods to probe LLM vulnerabilities by eliciting undesirable responses, effectively testing the limits of existing safety protocols within LLMs. Participants were challenged to devise techniques that could bypass content safeguards across a diverse array of scenarios, from offensive language to misinformation and illegal activities. Through this process, Track 1 aimed to deepen the understanding of LLM vulnerabilities and provide insights for creating more resilient models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14502v1-abstract-full').style.display = 'none'; document.getElementById('2411.14502v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14461">arXiv:2411.14461</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14461">pdf</a>, <a href="https://arxiv.org/format/2411.14461">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Towards Next-Generation Medical Agent: How o1 is Reshaping Decision-Making in Medical Scenarios </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Xu%2C+S">Shaochen Xu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yifan Zhou</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Z">Zhengliang Liu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Z">Zihao Wu</a>, <a href="/search/?searchtype=author&amp;query=Zhong%2C+T">Tianyang Zhong</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+H">Huaqin Zhao</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yiwei Li</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+H">Hanqi Jiang</a>, <a href="/search/?searchtype=author&amp;query=Pan%2C+Y">Yi Pan</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+J">Junhao Chen</a>, <a href="/search/?searchtype=author&amp;query=Lu%2C+J">Jin Lu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+W">Wei Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+T">Tuo Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+L">Lu Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+D">Dajiang Zhu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+X">Xiang Li</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wei Liu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Q">Quanzheng Li</a>, <a href="/search/?searchtype=author&amp;query=Sikora%2C+A">Andrea Sikora</a>, <a href="/search/?searchtype=author&amp;query=Zhai%2C+X">Xiaoming Zhai</a>, <a href="/search/?searchtype=author&amp;query=Xiang%2C+Z">Zhen Xiang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+T">Tianming Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14461v1-abstract-short" style="display: inline;"> Artificial Intelligence (AI) has become essential in modern healthcare, with large language models (LLMs) offering promising advances in clinical decision-making. Traditional model-based approaches, including those leveraging in-context demonstrations and those with specialized medical fine-tuning, have demonstrated strong performance in medical language processing but struggle with real-time adap&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14461v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14461v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14461v1-abstract-full" style="display: none;"> Artificial Intelligence (AI) has become essential in modern healthcare, with large language models (LLMs) offering promising advances in clinical decision-making. Traditional model-based approaches, including those leveraging in-context demonstrations and those with specialized medical fine-tuning, have demonstrated strong performance in medical language processing but struggle with real-time adaptability, multi-step reasoning, and handling complex medical tasks. Agent-based AI systems address these limitations by incorporating reasoning traces, tool selection based on context, knowledge retrieval, and both short- and long-term memory. These additional features enable the medical AI agent to handle complex medical scenarios where decision-making should be built on real-time interaction with the environment. Therefore, unlike conventional model-based approaches that treat medical queries as isolated questions, medical AI agents approach them as complex tasks and behave more like human doctors. In this paper, we study the choice of the backbone LLM for medical AI agents, which is the foundation for the agent&#39;s overall reasoning and action generation. In particular, we consider the emergent o1 model and examine its impact on agents&#39; reasoning, tool-use adaptability, and real-time information retrieval across diverse clinical scenarios, including high-stakes settings such as intensive care units (ICUs). Our findings demonstrate o1&#39;s ability to enhance diagnostic accuracy and consistency, paving the way for smarter, more responsive AI tools that support better patient outcomes and decision-making efficacy in clinical practice. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14461v1-abstract-full').style.display = 'none'; document.getElementById('2411.14461v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14347">arXiv:2411.14347</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14347">pdf</a>, <a href="https://arxiv.org/format/2411.14347">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DINO-X: A Unified Vision Model for Open-World Object Detection and Understanding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ren%2C+T">Tianhe Ren</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yihao Chen</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+Q">Qing Jiang</a>, <a href="/search/?searchtype=author&amp;query=Zeng%2C+Z">Zhaoyang Zeng</a>, <a href="/search/?searchtype=author&amp;query=Xiong%2C+Y">Yuda Xiong</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenlong Liu</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+Z">Zhengyu Ma</a>, <a href="/search/?searchtype=author&amp;query=Shen%2C+J">Junyi Shen</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+Y">Yuan Gao</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+X">Xiaoke Jiang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xingyu Chen</a>, <a href="/search/?searchtype=author&amp;query=Song%2C+Z">Zhuheng Song</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Yuhong Zhang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+H">Hongjie Huang</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+H">Han Gao</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+S">Shilong Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+H">Hao Zhang</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+F">Feng Li</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+K">Kent Yu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+L">Lei Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14347v1-abstract-short" style="display: inline;"> In this paper, we introduce DINO-X, which is a unified object-centric vision model developed by IDEA Research with the best open-world object detection performance to date. DINO-X employs the same Transformer-based encoder-decoder architecture as Grounding DINO 1.5 to pursue an object-level representation for open-world object understanding. To make long-tailed object detection easy, DINO-X extend&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14347v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14347v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14347v1-abstract-full" style="display: none;"> In this paper, we introduce DINO-X, which is a unified object-centric vision model developed by IDEA Research with the best open-world object detection performance to date. DINO-X employs the same Transformer-based encoder-decoder architecture as Grounding DINO 1.5 to pursue an object-level representation for open-world object understanding. To make long-tailed object detection easy, DINO-X extends its input options to support text prompt, visual prompt, and customized prompt. With such flexible prompt options, we develop a universal object prompt to support prompt-free open-world detection, making it possible to detect anything in an image without requiring users to provide any prompt. To enhance the model&#39;s core grounding capability, we have constructed a large-scale dataset with over 100 million high-quality grounding samples, referred to as Grounding-100M, for advancing the model&#39;s open-vocabulary detection performance. Pre-training on such a large-scale grounding dataset leads to a foundational object-level representation, which enables DINO-X to integrate multiple perception heads to simultaneously support multiple object perception and understanding tasks, including detection, segmentation, pose estimation, object captioning, object-based QA, etc. Experimental results demonstrate the superior performance of DINO-X. Specifically, the DINO-X Pro model achieves 56.0 AP, 59.8 AP, and 52.4 AP on the COCO, LVIS-minival, and LVIS-val zero-shot object detection benchmarks, respectively. Notably, it scores 63.3 AP and 56.5 AP on the rare classes of LVIS-minival and LVIS-val benchmarks, both improving the previous SOTA performance by 5.8 AP. Such a result underscores its significantly improved capacity for recognizing long-tailed objects. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14347v1-abstract-full').style.display = 'none'; document.getElementById('2411.14347v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Technical Report</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13856">arXiv:2411.13856</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13856">pdf</a>, <a href="https://arxiv.org/format/2411.13856">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> A Data-Driven Modeling and Motion Control of Heavy-Load Hydraulic Manipulators via Reversible Transformation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ma%2C+D">Dexian Ma</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Y">Yirong Liu</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenbo Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+B">Bo Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13856v1-abstract-short" style="display: inline;"> This work proposes a data-driven modeling and the corresponding hybrid motion control framework for unmanned and automated operation of industrial heavy-load hydraulic manipulator. Rather than the direct use of a neural network black box, we construct a reversible nonlinear model by using multilayer perceptron to approximate dynamics in the physical integrator chain system after reversible transfo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13856v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13856v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13856v1-abstract-full" style="display: none;"> This work proposes a data-driven modeling and the corresponding hybrid motion control framework for unmanned and automated operation of industrial heavy-load hydraulic manipulator. Rather than the direct use of a neural network black box, we construct a reversible nonlinear model by using multilayer perceptron to approximate dynamics in the physical integrator chain system after reversible transformations. The reversible nonlinear model is trained offline using supervised learning techniques, and the data are obtained from simulations or experiments. Entire hybrid motion control framework consists of the model inversion controller that compensates for the nonlinear dynamics and proportional-derivative controller that enhances the robustness. The stability is proved with Lyapunov theory. Co-simulation and Experiments show the effectiveness of proposed modeling and hybrid control framework. With a commercial 39-ton class hydraulic excavator for motion control tasks, the root mean square error of trajectory tracking error decreases by at least 50\% compared to traditional control methods. In addition, by analyzing the system model, the proposed framework can be rapidly applied to different control plants. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13856v1-abstract-full').style.display = 'none'; document.getElementById('2411.13856v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13331">arXiv:2411.13331</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13331">pdf</a>, <a href="https://arxiv.org/format/2411.13331">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> </div> </div> <p class="title is-5 mathjax"> Versatile photonic frequency synthetic dimensions using a single Mach-Zehnder-interferometer-assisted device on thin-film lithium niobate </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z">Zhao-An Wang</a>, <a href="/search/?searchtype=author&amp;query=Zeng%2C+X">Xiao-Dong Zeng</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yi-Tao Wang</a>, <a href="/search/?searchtype=author&amp;query=Ren%2C+J">Jia-Ming Ren</a>, <a href="/search/?searchtype=author&amp;query=Ao%2C+C">Chun Ao</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Z">Zhi-Peng Li</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wei Liu</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+N">Nai-Jie Guo</a>, <a href="/search/?searchtype=author&amp;query=Xie%2C+L">Lin-Ke Xie</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+J">Jun-You Liu</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+Y">Yu-Hang Ma</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Y">Ya-Qi Wu</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+S">Shuang Wang</a>, <a href="/search/?searchtype=author&amp;query=Tang%2C+J">Jian-Shun Tang</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+C">Chuan-Feng Li</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+G">Guang-Can Guo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13331v1-abstract-short" style="display: inline;"> Investigating physical models with photonic synthetic dimensions has been generating great interest in vast fields of science. The rapid developing thin-film lithium niobate (TFLN) platform, for its numerous advantages including high electro-optic coefficient and scalability, is well compatible with the realization of synthetic dimensions in the frequency together with spatial domain. While coupli&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13331v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13331v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13331v1-abstract-full" style="display: none;"> Investigating physical models with photonic synthetic dimensions has been generating great interest in vast fields of science. The rapid developing thin-film lithium niobate (TFLN) platform, for its numerous advantages including high electro-optic coefficient and scalability, is well compatible with the realization of synthetic dimensions in the frequency together with spatial domain. While coupling resonators with fixed beam splitters is a common experimental approach, it often lacks tunability and limits coupling between adjacent lattices to sites occupying the same frequency domain positions. Here, on the contrary, we conceive the resonator arrays connected by electro-optic tunable Mach-Zehnder interferometers in our configuration instead of fixed beam splitters. By applying bias voltage and RF modulation on the interferometers, our design extends such coupling to long-range scenario and allows for continuous tuning on each coupling strength and synthetic effective magnetic flux. Therefore, our design enriches controllable coupling types that are essential for building programmable lattice networks and significantly increases versatility. As the example, we experimentally fabricate a two-resonator prototype on the TFLN platform, and on this single chip we realize well-known models including tight-binding lattices, topological Hall ladder and Creutz ladder. We directly observe the band structures in the quasi-momentum space and important phenomena such as spin-momentum locking and the Aharonov-Bohm cage effect. These results demonstrate the potential for convenient simulations of more complex models in our configuration. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13331v1-abstract-full').style.display = 'none'; document.getElementById('2411.13331v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13162">arXiv:2411.13162</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13162">pdf</a>, <a href="https://arxiv.org/format/2411.13162">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> </div> </div> <p class="title is-5 mathjax"> IC Mechanisms for Risk-Averse Advertisers in the Online Advertising System </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+B">Bingzhe Wang</a>, <a href="/search/?searchtype=author&amp;query=Qian%2C+R">Ruohan Qian</a>, <a href="/search/?searchtype=author&amp;query=Dou%2C+Y">Yuejia Dou</a>, <a href="/search/?searchtype=author&amp;query=Qi%2C+Q">Qi Qi</a>, <a href="/search/?searchtype=author&amp;query=Shen%2C+B">Bo Shen</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+C">Changyuan Li</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Yixuan Zhang</a>, <a href="/search/?searchtype=author&amp;query=Su%2C+Y">Yixin Su</a>, <a href="/search/?searchtype=author&amp;query=Yuan%2C+X">Xin Yuan</a>, <a href="/search/?searchtype=author&amp;query=liu%2C+W">Wenqiang liu</a>, <a href="/search/?searchtype=author&amp;query=Zou%2C+B">Bin Zou</a>, <a href="/search/?searchtype=author&amp;query=Yi%2C+W">Wen Yi</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+Z">Zhi Guo</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+S">Shuanglong Li</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+L">Liu Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13162v1-abstract-short" style="display: inline;"> The autobidding system generates huge revenue for advertising platforms, garnering substantial research attention. Existing studies in autobidding systems focus on designing Autobidding Incentive Compatible (AIC) mechanisms, where the mechanism is Incentive Compatible (IC) under ex ante expectations. However, upon deploying AIC mechanisms in advertising platforms, we observe a notable deviation be&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13162v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13162v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13162v1-abstract-full" style="display: none;"> The autobidding system generates huge revenue for advertising platforms, garnering substantial research attention. Existing studies in autobidding systems focus on designing Autobidding Incentive Compatible (AIC) mechanisms, where the mechanism is Incentive Compatible (IC) under ex ante expectations. However, upon deploying AIC mechanisms in advertising platforms, we observe a notable deviation between the actual auction outcomes and these expectations during runtime, particularly in the scene with few clicks (sparse-click). This discrepancy undermines truthful bidding among advertisers in AIC mechanisms, especially for risk-averse advertisers who are averse to outcomes that do not align with the expectations. To address this issue, we propose a mechanism, Decoupled First-Price Auction (DFP), that retains its IC property even during runtime. DFP dynamically adjusts the payment based on real-time user conversion outcomes, ensuring that advertisers&#39; realized utilities closely approximate their expected utilities during runtime. To realize the payment mechanism of DFP, we propose a PPO-based RL algorithm, with a meticulously crafted reward function. This algorithm dynamically adjusts the payment to fit DFP mechanism. We conduct extensive experiments leveraging real-world data to validate our findings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13162v1-abstract-full').style.display = 'none'; document.getElementById('2411.13162v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12782">arXiv:2411.12782</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12782">pdf</a>, <a href="https://arxiv.org/format/2411.12782">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Mesoscale and Nanoscale Physics">cond-mat.mes-hall</span> </div> </div> <p class="title is-5 mathjax"> Multiplexed readout of ultrasensitive bolometers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Singh%2C+P">Priyank Singh</a>, <a href="/search/?searchtype=author&amp;query=Gunyh%C3%B3%2C+A">Andr谩s Gunyh贸</a>, <a href="/search/?searchtype=author&amp;query=Suominen%2C+H">Heikki Suominen</a>, <a href="/search/?searchtype=author&amp;query=Catto%2C+G">Giacomo Catto</a>, <a href="/search/?searchtype=author&amp;query=Blanchet%2C+F">Florian Blanchet</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Q">Qi-Ming Chen</a>, <a href="/search/?searchtype=author&amp;query=Alizadeh%2C+A">Arman Alizadeh</a>, <a href="/search/?searchtype=author&amp;query=Ker%C3%A4nen%2C+A">Aarne Ker盲nen</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+J">Jian Ma</a>, <a href="/search/?searchtype=author&amp;query=M%C3%B6rstedt%2C+T">Timm M枚rstedt</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wei Liu</a>, <a href="/search/?searchtype=author&amp;query=M%C3%B6ttonen%2C+M">Mikko M枚ttonen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12782v1-abstract-short" style="display: inline;"> Recently, ultrasensitive calorimeters have been proposed as a resource-efficient solution for multiplexed qubit readout in superconducting large-scale quantum processors. However, experiments demonstrating frequency multiplexing of these superconductor-normal conductor-superconductor (SNS) sensors are coarse. To this end, we present the design, fabrication, and operation of three SNS sensors with&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12782v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12782v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12782v1-abstract-full" style="display: none;"> Recently, ultrasensitive calorimeters have been proposed as a resource-efficient solution for multiplexed qubit readout in superconducting large-scale quantum processors. However, experiments demonstrating frequency multiplexing of these superconductor-normal conductor-superconductor (SNS) sensors are coarse. To this end, we present the design, fabrication, and operation of three SNS sensors with frequency-multiplexed input and probe circuits, all on a single chip. These devices have their probe frequencies in the range \SI{150}{\mega\hertz} -- \SI{200}{\mega\hertz}, which is well detuned from the heater frequencies of \SI{4.4}{\giga\hertz} -- \SI{7.6}{\giga\hertz} compatible with typical readout frequencies of superconducting qubits. Importantly, we show on-demand triggering of both individual and multiple low-noise SNS bolometers with very low cross talk. These experiments pave the way for multiplexed bolometric characterization and calorimetric readout of multiple qubits, a promising step in minimizing related resources such as the number of readout lines and microwave isolators in large-scale superconducting quantum computers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12782v1-abstract-full').style.display = 'none'; document.getElementById('2411.12782v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12185">arXiv:2411.12185</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12185">pdf</a>, <a href="https://arxiv.org/format/2411.12185">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> LiV-GS: LiDAR-Vision Integration for 3D Gaussian Splatting SLAM in Outdoor Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Xiao%2C+R">Renxiang Xiao</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wei Liu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yushuai Chen</a>, <a href="/search/?searchtype=author&amp;query=Hu%2C+L">Liang Hu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12185v1-abstract-short" style="display: inline;"> We present LiV-GS, a LiDAR-visual SLAM system in outdoor environments that leverages 3D Gaussian as a differentiable spatial representation. Notably, LiV-GS is the first method that directly aligns discrete and sparse LiDAR data with continuous differentiable Gaussian maps in large-scale outdoor scenes, overcoming the limitation of fixed resolution in traditional LiDAR mapping. The system aligns p&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12185v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12185v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12185v1-abstract-full" style="display: none;"> We present LiV-GS, a LiDAR-visual SLAM system in outdoor environments that leverages 3D Gaussian as a differentiable spatial representation. Notably, LiV-GS is the first method that directly aligns discrete and sparse LiDAR data with continuous differentiable Gaussian maps in large-scale outdoor scenes, overcoming the limitation of fixed resolution in traditional LiDAR mapping. The system aligns point clouds with Gaussian maps using shared covariance attributes for front-end tracking and integrates the normal orientation into the loss function to refines the Gaussian map. To reliably and stably update Gaussians outside the LiDAR field of view, we introduce a novel conditional Gaussian constraint that aligns these Gaussians closely with the nearest reliable ones. The targeted adjustment enables LiV-GS to achieve fast and accurate mapping with novel view synthesis at a rate of 7.98 FPS. Extensive comparative experiments demonstrate LiV-GS&#39;s superior performance in SLAM, image rendering and mapping. The successful cross-modal radar-LiDAR localization highlights the potential of LiV-GS for applications in cross-modal semantic positioning and object segmentation with Gaussian maps. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12185v1-abstract-full').style.display = 'none'; document.getElementById('2411.12185v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12178">arXiv:2411.12178</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12178">pdf</a>, <a href="https://arxiv.org/format/2411.12178">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> First evidence for direct CP violation in beauty to charmonium decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1127 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12178v2-abstract-short" style="display: inline;"> The $C\!P$ asymmetry and branching fraction of the CKM-suppressed decay $B^+\!\to J\mskip -3mu/\mskip -2mu蠄\,蟺^+$ are precisely measured relative to the favoured decay $B^+\!\to J\mskip -3mu/\mskip -2mu蠄\,K^+$, using a sample of proton-proton collision data corresponding to an integrated luminosity of $5.4~\mathrm{fb}^{-1}$ recorded at center-of-mass energy of $13~\mathrm{TeV}$ during 2016--2018.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12178v2-abstract-full').style.display = 'inline'; document.getElementById('2411.12178v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12178v2-abstract-full" style="display: none;"> The $C\!P$ asymmetry and branching fraction of the CKM-suppressed decay $B^+\!\to J\mskip -3mu/\mskip -2mu蠄\,蟺^+$ are precisely measured relative to the favoured decay $B^+\!\to J\mskip -3mu/\mskip -2mu蠄\,K^+$, using a sample of proton-proton collision data corresponding to an integrated luminosity of $5.4~\mathrm{fb}^{-1}$ recorded at center-of-mass energy of $13~\mathrm{TeV}$ during 2016--2018. The results of the $C\!P$ asymmetry difference and branching fraction ratio are \begin{align*} 螖\mathcal{A}^{C\!P} &amp;\equiv \mathcal{A}^{C\!P}(B^+ \to J\mskip -3mu/\mskip -2mu蠄\,蟺^+) - \mathcal{A}^{C\!P}(B^+ \to J\mskip -3mu/\mskip -2mu蠄\,K^+) = (1.29 \pm 0.49 \pm 0.08) \times 10^{-2}, \end{align*} \begin{equation*} \mathcal{R}_{蟺/K} \equiv \frac{\mathcal{B}(B^+ \!\to J\mskip -3mu/\mskip -2mu蠄\,蟺^+)}{\mathcal{B}(B^+ \!\to J\mskip -3mu/\mskip -2mu蠄\,K^+)} = (3.852 \pm 0.022 \pm 0.018) \times 10^{-2}. \end{equation*} where the first uncertainties are statistical and the second systematic. A combination with previous LHCb results based on data collected at $7$ and $8~\mathrm{TeV}$ in 2011 and 2012 yields $螖\mathcal{A}^{C\!P} = (1.42 \pm 0.43 \pm 0.08) \times 10^{-2}$ and $\mathcal{R}_{蟺/K} = (3.846 \pm 0.018 \pm 0.018) \times 10^{-2}$. The combined $螖\mathcal{A}^{C\!P}$ value deviates from zero by 3.2 standard deviations, providing the first evidence for direct $C\!P$ violation in the amplitudes of beauty decays to charmonium final states. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12178v2-abstract-full').style.display = 'none'; document.getElementById('2411.12178v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 2 figures, no conference or journal information All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/1623/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-031 CERN-EP-2024-286 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12156">arXiv:2411.12156</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12156">pdf</a>, <a href="https://arxiv.org/format/2411.12156">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> HNCSE: Advancing Sentence Embeddings via Hybrid Contrastive Learning with Hard Negatives </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenxiao Liu</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+Z">Zihong Yang</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+C">Chaozhuo Li</a>, <a href="/search/?searchtype=author&amp;query=Hong%2C+Z">Zijin Hong</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+J">Jianfeng Ma</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Z">Zhiquan Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+L">Litian Zhang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+F">Feiran Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12156v1-abstract-short" style="display: inline;"> Unsupervised sentence representation learning remains a critical challenge in modern natural language processing (NLP) research. Recently, contrastive learning techniques have achieved significant success in addressing this issue by effectively capturing textual semantics. Many such approaches prioritize the optimization using negative samples. In fields such as computer vision, hard negative samp&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12156v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12156v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12156v1-abstract-full" style="display: none;"> Unsupervised sentence representation learning remains a critical challenge in modern natural language processing (NLP) research. Recently, contrastive learning techniques have achieved significant success in addressing this issue by effectively capturing textual semantics. Many such approaches prioritize the optimization using negative samples. In fields such as computer vision, hard negative samples (samples that are close to the decision boundary and thus more difficult to distinguish) have been shown to enhance representation learning. However, adapting hard negatives to contrastive sentence learning is complex due to the intricate syntactic and semantic details of text. To address this problem, we propose HNCSE, a novel contrastive learning framework that extends the leading SimCSE approach. The hallmark of HNCSE is its innovative use of hard negative samples to enhance the learning of both positive and negative samples, thereby achieving a deeper semantic understanding. Empirical tests on semantic textual similarity and transfer task datasets validate the superiority of HNCSE. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12156v1-abstract-full').style.display = 'none'; document.getElementById('2411.12156v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11648">arXiv:2411.11648</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11648">pdf</a>, <a href="https://arxiv.org/ps/2411.11648">ps</a>, <a href="https://arxiv.org/format/2411.11648">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> </div> </div> <p class="title is-5 mathjax"> Evidence for Two Excited $惟^{-}$ Hyperons </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a>, <a href="/search/?searchtype=author&amp;query=Brueggemann%2C+A">A. Brueggemann</a> , et al. (650 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11648v1-abstract-short" style="display: inline;"> Using $e^+e^-$ collision data corresponding to an integrated luminosity of 19 fb$^{-1}$ collected by the BESIII detector at center-of-mass energies ranging from 4.13 to 4.70 GeV, we report the first evidence for a new excited $惟^{-}$ hyperon, the $惟^*(2109)^{-}$, through the process $e^+ e^- \to 惟^*(2109)^{-} \bar惟^{+} +c.c.$ with a significance of 3.7 $蟽$. The mass and width of $惟^*(2109)^{-}$ ar&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11648v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11648v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11648v1-abstract-full" style="display: none;"> Using $e^+e^-$ collision data corresponding to an integrated luminosity of 19 fb$^{-1}$ collected by the BESIII detector at center-of-mass energies ranging from 4.13 to 4.70 GeV, we report the first evidence for a new excited $惟^{-}$ hyperon, the $惟^*(2109)^{-}$, through the process $e^+ e^- \to 惟^*(2109)^{-} \bar惟^{+} +c.c.$ with a significance of 3.7 $蟽$. The mass and width of $惟^*(2109)^{-}$ are measured to be $2108.8 \pm 5.5_{\rm stat} \pm 1.5_{\rm syst} {\rm MeV}/c^{2}$ and $21.6 \pm 17.7_{\rm stat} \pm 9.4_{\rm syst} {\rm MeV}$, respectively. We also present evidence for production of the $惟^*(2012)^{-}$ in the process $e^+ e^- \to 惟^*(2012)^{-} \bar惟^{+} +c.c.$ with a significance of 3.7 $蟽$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11648v1-abstract-full').style.display = 'none'; document.getElementById('2411.11648v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11409">arXiv:2411.11409</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11409">pdf</a>, <a href="https://arxiv.org/format/2411.11409">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> IKEA Manuals at Work: 4D Grounding of Assembly Instructions on Internet Videos </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liu%2C+Y">Yunong Liu</a>, <a href="/search/?searchtype=author&amp;query=Eyzaguirre%2C+C">Cristobal Eyzaguirre</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+M">Manling Li</a>, <a href="/search/?searchtype=author&amp;query=Khanna%2C+S">Shubh Khanna</a>, <a href="/search/?searchtype=author&amp;query=Niebles%2C+J+C">Juan Carlos Niebles</a>, <a href="/search/?searchtype=author&amp;query=Ravi%2C+V">Vineeth Ravi</a>, <a href="/search/?searchtype=author&amp;query=Mishra%2C+S">Saumitra Mishra</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Weiyu Liu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+J">Jiajun Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11409v1-abstract-short" style="display: inline;"> Shape assembly is a ubiquitous task in daily life, integral for constructing complex 3D structures like IKEA furniture. While significant progress has been made in developing autonomous agents for shape assembly, existing datasets have not yet tackled the 4D grounding of assembly instructions in videos, essential for a holistic understanding of assembly in 3D space over time. We introduce IKEA Vid&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11409v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11409v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11409v1-abstract-full" style="display: none;"> Shape assembly is a ubiquitous task in daily life, integral for constructing complex 3D structures like IKEA furniture. While significant progress has been made in developing autonomous agents for shape assembly, existing datasets have not yet tackled the 4D grounding of assembly instructions in videos, essential for a holistic understanding of assembly in 3D space over time. We introduce IKEA Video Manuals, a dataset that features 3D models of furniture parts, instructional manuals, assembly videos from the Internet, and most importantly, annotations of dense spatio-temporal alignments between these data modalities. To demonstrate the utility of IKEA Video Manuals, we present five applications essential for shape assembly: assembly plan generation, part-conditioned segmentation, part-conditioned pose estimation, video object segmentation, and furniture assembly based on instructional video manuals. For each application, we provide evaluation metrics and baseline methods. Through experiments on our annotated data, we highlight many challenges in grounding assembly instructions in videos to improve shape assembly, including handling occlusions, varying viewpoints, and extended assembly sequences. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11409v1-abstract-full').style.display = 'none'; document.getElementById('2411.11409v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NeurIPS 2024 Datasets and Benchmarks Track</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10742">arXiv:2411.10742</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10742">pdf</a>, <a href="https://arxiv.org/format/2411.10742">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> It Takes Two: Accurate Gait Recognition in the Wild via Cross-granularity Alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zheng%2C+J">Jinkai Zheng</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+X">Xinchen Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+B">Boyue Zhang</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+C">Chenggang Yan</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+J">Jiyong Zhang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wu Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Yongdong Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10742v1-abstract-short" style="display: inline;"> Existing studies for gait recognition primarily utilized sequences of either binary silhouette or human parsing to encode the shapes and dynamics of persons during walking. Silhouettes exhibit accurate segmentation quality and robustness to environmental variations, but their low information entropy may result in sub-optimal performance. In contrast, human parsing provides fine-grained part segmen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10742v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10742v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10742v1-abstract-full" style="display: none;"> Existing studies for gait recognition primarily utilized sequences of either binary silhouette or human parsing to encode the shapes and dynamics of persons during walking. Silhouettes exhibit accurate segmentation quality and robustness to environmental variations, but their low information entropy may result in sub-optimal performance. In contrast, human parsing provides fine-grained part segmentation with higher information entropy, but the segmentation quality may deteriorate due to the complex environments. To discover the advantages of silhouette and parsing and overcome their limitations, this paper proposes a novel cross-granularity alignment gait recognition method, named XGait, to unleash the power of gait representations of different granularity. To achieve this goal, the XGait first contains two branches of backbone encoders to map the silhouette sequences and the parsing sequences into two latent spaces, respectively. Moreover, to explore the complementary knowledge across the features of two representations, we design the Global Cross-granularity Module (GCM) and the Part Cross-granularity Module (PCM) after the two encoders. In particular, the GCM aims to enhance the quality of parsing features by leveraging global features from silhouettes, while the PCM aligns the dynamics of human parts between silhouette and parsing features using the high information entropy in parsing sequences. In addition, to effectively guide the alignment of two representations with different granularity at the part level, an elaborate-designed learnable division mechanism is proposed for the parsing features. Comprehensive experiments on two large-scale gait datasets not only show the superior performance of XGait with the Rank-1 accuracy of 80.5% on Gait3D and 88.3% CCPG but also reflect the robustness of the learned features even under challenging conditions like occlusions and cloth changes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10742v1-abstract-full').style.display = 'none'; document.getElementById('2411.10742v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 9 figures; Accepted by ACM MM 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10702">arXiv:2411.10702</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10702">pdf</a>, <a href="https://arxiv.org/format/2411.10702">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Wireless Resource Allocation with Collaborative Distributed and Centralized DRL under Control Channel Attacks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+K">Ke Wang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wanchun Liu</a>, <a href="/search/?searchtype=author&amp;query=Lim%2C+T+J">Teng Joon Lim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10702v1-abstract-short" style="display: inline;"> In this paper, we consider a wireless resource allocation problem in a cyber-physical system (CPS) where the control channel, carrying resource allocation commands, is subjected to denial-of-service (DoS) attacks. We propose a novel concept of collaborative distributed and centralized (CDC) resource allocation to effectively mitigate the impact of these attacks. To optimize the CDC resource alloca&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10702v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10702v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10702v1-abstract-full" style="display: none;"> In this paper, we consider a wireless resource allocation problem in a cyber-physical system (CPS) where the control channel, carrying resource allocation commands, is subjected to denial-of-service (DoS) attacks. We propose a novel concept of collaborative distributed and centralized (CDC) resource allocation to effectively mitigate the impact of these attacks. To optimize the CDC resource allocation policy, we develop a new CDC-deep reinforcement learning (DRL) algorithm, whereas existing DRL frameworks only formulate either centralized or distributed decision-making problems. Simulation results demonstrate that the CDC-DRL algorithm significantly outperforms state-of-the-art DRL benchmarks, showcasing its ability to address resource allocation problems in large-scale CPSs under control channel attacks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10702v1-abstract-full').style.display = 'none'; document.getElementById('2411.10702v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work has been submitted to the IEEE for possible publication</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10696">arXiv:2411.10696</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10696">pdf</a>, <a href="https://arxiv.org/format/2411.10696">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> HELENE: Hessian Layer-wise Clipping and Gradient Annealing for Accelerating Fine-tuning LLM with Zeroth-order Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhao%2C+H">Huaqin Zhao</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+J">Jiaxi Li</a>, <a href="/search/?searchtype=author&amp;query=Pan%2C+Y">Yi Pan</a>, <a href="/search/?searchtype=author&amp;query=Liang%2C+S">Shizhe Liang</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+X">Xiaofeng Yang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wei Liu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+X">Xiang Li</a>, <a href="/search/?searchtype=author&amp;query=Dou%2C+F">Fei Dou</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+T">Tianming Liu</a>, <a href="/search/?searchtype=author&amp;query=Lu%2C+J">Jin Lu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10696v1-abstract-short" style="display: inline;"> Fine-tuning large language models (LLMs) poses significant memory challenges, as the back-propagation process demands extensive resources, especially with growing model sizes. Recent work, MeZO, addresses this issue using a zeroth-order (ZO) optimization method, which reduces memory consumption by matching the usage to the inference phase. However, MeZO experiences slow convergence due to varying&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10696v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10696v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10696v1-abstract-full" style="display: none;"> Fine-tuning large language models (LLMs) poses significant memory challenges, as the back-propagation process demands extensive resources, especially with growing model sizes. Recent work, MeZO, addresses this issue using a zeroth-order (ZO) optimization method, which reduces memory consumption by matching the usage to the inference phase. However, MeZO experiences slow convergence due to varying curvatures across model parameters. To overcome this limitation, we introduce HELENE, a novel scalable and memory-efficient optimizer that integrates annealed A-GNB gradients with a diagonal Hessian estimation and layer-wise clipping, serving as a second-order pre-conditioner. This combination allows for faster and more stable convergence. Our theoretical analysis demonstrates that HELENE improves convergence rates, particularly for models with heterogeneous layer dimensions, by reducing the dependency on the total parameter space dimension. Instead, the method scales with the largest layer dimension, making it highly suitable for modern LLM architectures. Experimental results on RoBERTa-large and OPT-1.3B across multiple tasks show that HELENE achieves up to a 20x speedup compared to MeZO, with average accuracy improvements of 1.5%. Furthermore, HELENE remains compatible with both full parameter tuning and parameter-efficient fine-tuning (PEFT), outperforming several state-of-the-art optimizers. The codes will be released after reviewing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10696v1-abstract-full').style.display = 'none'; document.getElementById('2411.10696v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10261">arXiv:2411.10261</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10261">pdf</a>, <a href="https://arxiv.org/format/2411.10261">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Partial Scene Text Retrieval </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+H">Hao Wang</a>, <a href="/search/?searchtype=author&amp;query=Liao%2C+M">Minghui Liao</a>, <a href="/search/?searchtype=author&amp;query=Xie%2C+Z">Zhouyi Xie</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenyu Liu</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+X">Xiang Bai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10261v2-abstract-short" style="display: inline;"> The task of partial scene text retrieval involves localizing and searching for text instances that are the same or similar to a given query text from an image gallery. However, existing methods can only handle text-line instances, leaving the problem of searching for partial patches within these text-line instances unsolved due to a lack of patch annotations in the training data. To address this i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10261v2-abstract-full').style.display = 'inline'; document.getElementById('2411.10261v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10261v2-abstract-full" style="display: none;"> The task of partial scene text retrieval involves localizing and searching for text instances that are the same or similar to a given query text from an image gallery. However, existing methods can only handle text-line instances, leaving the problem of searching for partial patches within these text-line instances unsolved due to a lack of patch annotations in the training data. To address this issue, we propose a network that can simultaneously retrieve both text-line instances and their partial patches. Our method embeds the two types of data (query text and scene text instances) into a shared feature space and measures their cross-modal similarities. To handle partial patches, our proposed approach adopts a Multiple Instance Learning (MIL) approach to learn their similarities with query text, without requiring extra annotations. However, constructing bags, which is a standard step of conventional MIL approaches, can introduce numerous noisy samples for training, and lower inference speed. To address this issue, we propose a Ranking MIL (RankMIL) approach to adaptively filter those noisy samples. Additionally, we present a Dynamic Partial Match Algorithm (DPMA) that can directly search for the target partial patch from a text-line instance during the inference stage, without requiring bags. This greatly improves the search efficiency and the performance of retrieving partial patches. The source code and dataset are available at https://github.com/lanfeng4659/PSTR. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10261v2-abstract-full').style.display = 'none'; document.getElementById('2411.10261v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted on TPAMI</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10219">arXiv:2411.10219</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10219">pdf</a>, <a href="https://arxiv.org/format/2411.10219">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Constraints on the photon polarisation in $b \to s 纬$ transitions using $B_s^0 \rightarrow 蠁e^+e^-$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1120 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10219v2-abstract-short" style="display: inline;"> An angular analysis of the $B_s^0 \rightarrow 蠁e^+e^-$ decay is performed using the proton-proton collision dataset collected between 2011 and 2018 by the LHCb experiment, corresponding to an integrated luminosity of $9\,{\rm fb}^{-1}$ at centre-of-mass energies of 7, 8 and $13\,{\rm TeV}$. The analysis is performed in the very low dielectron invariant mass-squared region between $0.0009$ and&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10219v2-abstract-full').style.display = 'inline'; document.getElementById('2411.10219v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10219v2-abstract-full" style="display: none;"> An angular analysis of the $B_s^0 \rightarrow 蠁e^+e^-$ decay is performed using the proton-proton collision dataset collected between 2011 and 2018 by the LHCb experiment, corresponding to an integrated luminosity of $9\,{\rm fb}^{-1}$ at centre-of-mass energies of 7, 8 and $13\,{\rm TeV}$. The analysis is performed in the very low dielectron invariant mass-squared region between $0.0009$ and $0.2615\,{\rm GeV}^2\!/c^4$. The longitudinal polarisation fraction of the $蠁$ meson is measured to be less than $11.5\%$ at $90\%$ confidence level. The $A_{\mathrm{T}}^{\mathcal{R}e C\!P}$ observable, which is related to the lepton forward-backward asymmetry, is measured to be $0.116 \pm 0.155 \pm 0.006$, where the first uncertainty is statistical and the second systematic. The transverse asymmetries, $A_{\mathrm{T}}^{(2)}$ and $A_{\mathrm{T}}^{\mathcal{I}m C\!P}$ , which are sensitive to the virtual photon polarisation, are found to be $-0.045 \pm 0.235 \pm 0.014$ and $0.002 \pm 0.247 \pm 0.016$, respectively. The results are consistent with Standard Model predictions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10219v2-abstract-full').style.display = 'none'; document.getElementById('2411.10219v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages, 4 figures. All figures and tables, along with any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/3433/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-030, CERN-EP-2024-276 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10102">arXiv:2411.10102</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10102">pdf</a>, <a href="https://arxiv.org/format/2411.10102">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Theory">hep-th</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="General Relativity and Quantum Cosmology">gr-qc</span> </div> </div> <p class="title is-5 mathjax"> Novel Topological Classes in Black Hole Thermodynamics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wu%2C+D">Di Wu</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wentao Liu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+S">Shuang-Qing Wu</a>, <a href="/search/?searchtype=author&amp;query=Mann%2C+R+B">Robert B. Mann</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10102v1-abstract-short" style="display: inline;"> By viewing black hole solutions as topological defects in thermodynamic parameter space, we unveil a novel topological class and two new topological subclasses, respectively denoted $W^{0-\leftrightarrow 1+}$, $\overline{W}^{1+}$, and $\hat{W}^{1+}$, that extend beyond the four established categories proposed by Wei et al. [Phys. Rev. D 110, L081501 (2024)]. Within the newly identified class and t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10102v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10102v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10102v1-abstract-full" style="display: none;"> By viewing black hole solutions as topological defects in thermodynamic parameter space, we unveil a novel topological class and two new topological subclasses, respectively denoted $W^{0-\leftrightarrow 1+}$, $\overline{W}^{1+}$, and $\hat{W}^{1+}$, that extend beyond the four established categories proposed by Wei et al. [Phys. Rev. D 110, L081501 (2024)]. Within the newly identified class and these two novel subclasses, the innermost small black hole states exhibit a distinct sequence of unstable, stable, and stable behaviours, while the outermost large black hole states display a uniform pattern of stable behaviours. These classifications indicate thermodynamic properties in both the low and high Hawking temperature regimes that are strikingly different from the previously known four topological classes. In particular, we demonstrate that static charged AdS black holes in gauged supergravity exhibit an intricate thermodynamic evolution that is notably distinct from that of the Reissner-Nordstr枚m anti-de Sitter (RN-AdS) black hole. From a topological perspective, we emphasize the advantages and potential of investigating thermodynamic phase transitions in these black holes, an area that has been rarely explored in previous research. Our findings not only enrich and sharpen the framework of topological classifications in black hole thermodynamics, but also represent a significant stride toward unraveling the fundamental nature of black holes and gravity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10102v1-abstract-full').style.display = 'none'; document.getElementById('2411.10102v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5+2 pages, 5 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10003">arXiv:2411.10003</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10003">pdf</a>, <a href="https://arxiv.org/format/2411.10003">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Pro-Prophet: A Systematic Load Balancing Method for Efficient Parallel Training of Large-scale MoE Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+W">Wei Wang</a>, <a href="/search/?searchtype=author&amp;query=Lai%2C+Z">Zhiquan Lai</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+S">Shengwei Li</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Weijie Liu</a>, <a href="/search/?searchtype=author&amp;query=Ge%2C+K">Keshi Ge</a>, <a href="/search/?searchtype=author&amp;query=Shen%2C+A">Ao Shen</a>, <a href="/search/?searchtype=author&amp;query=Su%2C+H">Huayou Su</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+D">Dongsheng Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10003v2-abstract-short" style="display: inline;"> The size of deep learning models has been increasing to enhance model quality. The linear increase in training computation budget with model size means that training an extremely large-scale model is exceedingly time-consuming. Recently, the Mixture of Expert (MoE) has drawn significant attention as it can scale models to extra-large sizes with a stable computation budget. However, inefficient dis&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10003v2-abstract-full').style.display = 'inline'; document.getElementById('2411.10003v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10003v2-abstract-full" style="display: none;"> The size of deep learning models has been increasing to enhance model quality. The linear increase in training computation budget with model size means that training an extremely large-scale model is exceedingly time-consuming. Recently, the Mixture of Expert (MoE) has drawn significant attention as it can scale models to extra-large sizes with a stable computation budget. However, inefficient distributed training of large-scale MoE models hinders their broader application. Specifically, a considerable dynamic load imbalance occurs among devices during training, significantly reducing throughput. Several load-balancing works have been proposed to address the challenge. System-level solutions draw more attention for their hardware affinity and non-disruption of model convergence compared to algorithm-level ones. However, they are troubled by high communication costs and poor communication-computation overlapping. To address these challenges, we propose a systematic load-balancing method, Pro-Prophet, which consists of a planner and a scheduler for efficient parallel training of large-scale MoE models. To adapt to the dynamic load imbalance, we profile training statistics and use them to design Pro-Prophet. For lower communication volume, Pro-Prophet planner determines a series of lightweight load-balancing strategies and efficiently searches for a communication-efficient one for training based on the statistics. For sufficient overlapping of communication and computation, Pro-Prophet scheduler schedules the data-dependent operations based on the statistics and operation features, further improving the training throughput. Experimental results indicate that Pro-Prophet achieves up to 2.66x speedup compared to Deepspeed-MoE and FasterMoE. Additionally, Pro-Prophet achieves a load-balancing enhancement of up to 11.01x when compared to FasterMoE. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10003v2-abstract-full').style.display = 'none'; document.getElementById('2411.10003v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09413">arXiv:2411.09413</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09413">pdf</a>, <a href="https://arxiv.org/ps/2411.09413">ps</a>, <a href="https://arxiv.org/format/2411.09413">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Script-centric behavior understanding for assisted autism spectrum disorder diagnosis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenxing Liu</a>, <a href="/search/?searchtype=author&amp;query=Pan%2C+Y">Yueran Pan</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+M">Ming Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09413v1-abstract-short" style="display: inline;"> Observing and analyzing children&#39;s social behaviors is crucial for the early diagnosis of Autism Spectrum Disorders (ASD). This work focuses on automatically detecting ASD using computer vision techniques and large language models (LLMs). Existing methods typically rely on supervised learning. However, the scarcity of ASD diagnostic datasets and the lack of interpretability in diagnostic results s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09413v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09413v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09413v1-abstract-full" style="display: none;"> Observing and analyzing children&#39;s social behaviors is crucial for the early diagnosis of Autism Spectrum Disorders (ASD). This work focuses on automatically detecting ASD using computer vision techniques and large language models (LLMs). Existing methods typically rely on supervised learning. However, the scarcity of ASD diagnostic datasets and the lack of interpretability in diagnostic results significantly limits its clinical application. To address these challenges, we introduce a novel unsupervised approach based on script-centric behavior understanding. Our pipeline converts video content into scripts that describe the behavior of characters, leveraging the generalizability of large language models to detect ASD in a zero-shot or few-shot manner. Specifically, we propose a scripts transcription module for multimodal behavior data textualization and a domain prompts module to bridge LLMs. Our method achieves an accuracy of 92.00\% in diagnosing ASD in children with an average age of 24 months, surpassing the performance of supervised learning methods by 3.58\% absolutely. Extensive experiments confirm the effectiveness of our approach and suggest its potential for advancing ASD research through LLMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09413v1-abstract-full').style.display = 'none'; document.getElementById('2411.09413v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 4 figures, submitted to ICASSP 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09343">arXiv:2411.09343</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09343">pdf</a>, <a href="https://arxiv.org/format/2411.09343">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Nuclear Experiment">nucl-ex</span> </div> </div> <p class="title is-5 mathjax"> Measurement of $蠁(1020)$ meson production in fixed-target $\textit{p}$Ne collisions at $\sqrt{s_{NN}}$ = 68.5 GeV </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1127 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09343v1-abstract-short" style="display: inline;"> The first measurement of $蠁(1020)$ meson production in fixed-target $p$Ne collisions at $\sqrt{s_{NN}}=68.5$ GeV is presented. The $蠁(1020)$ mesons are reconstructed in their $K^{+}K^{-}$ decay in a data sample consisting of proton collisions on neon nuclei at rest, corresponding to an integrated luminosity of $21.7 \pm 1.4$ nb$^{-1}$, collected by the LHCb detector at CERN. The $蠁(1020)$ producti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09343v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09343v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09343v1-abstract-full" style="display: none;"> The first measurement of $蠁(1020)$ meson production in fixed-target $p$Ne collisions at $\sqrt{s_{NN}}=68.5$ GeV is presented. The $蠁(1020)$ mesons are reconstructed in their $K^{+}K^{-}$ decay in a data sample consisting of proton collisions on neon nuclei at rest, corresponding to an integrated luminosity of $21.7 \pm 1.4$ nb$^{-1}$, collected by the LHCb detector at CERN. The $蠁(1020)$ production cross-section in the centre-of-mass rapidity range of $-1.8&lt;y^*&lt;0$ and transverse momentum range of $800&lt;p_{T}&lt;6500$ MeV/c is found to be $蟽=182.7\pm2.7~\text{(stat.)}\pm14.1~\text{(syst)}~渭$b/nucleon. A double-differential measurement of the cross-section is also provided in four regions of rapidity and six regions of transverse momentum of the $蠁(1020)$ meson and compared with the predictions from Pythia and EPOS4, which are found to underestimate the experimental values. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09343v1-abstract-full').style.display = 'none'; document.getElementById('2411.09343v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/3673/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-036, CERN-EP-2024-274 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08475">arXiv:2411.08475</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08475">pdf</a>, <a href="https://arxiv.org/ps/2411.08475">ps</a>, <a href="https://arxiv.org/format/2411.08475">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> </div> </div> <p class="title is-5 mathjax"> Anti-Ramsey Number of Friendship Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenke Liu</a>, <a href="/search/?searchtype=author&amp;query=Lu%2C+H">Hongliang Lu</a>, <a href="/search/?searchtype=author&amp;query=Luo%2C+X">Xinyue Luo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08475v2-abstract-short" style="display: inline;"> An edge-colored graph is called \textit{rainbow graph} if all the colors on its edges are distinct. For a given positive integer $n$ and a family of graphs $\mathcal{G}$, the anti-Ramsey number $ar(n, \mathcal{G})$ is the smallest number of colors $r$ required to ensure that, no matter how the edges of the complete graph $K_n$ are colored using exactly $r$ colors, there will always be a rainbow co&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08475v2-abstract-full').style.display = 'inline'; document.getElementById('2411.08475v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08475v2-abstract-full" style="display: none;"> An edge-colored graph is called \textit{rainbow graph} if all the colors on its edges are distinct. For a given positive integer $n$ and a family of graphs $\mathcal{G}$, the anti-Ramsey number $ar(n, \mathcal{G})$ is the smallest number of colors $r$ required to ensure that, no matter how the edges of the complete graph $K_n$ are colored using exactly $r$ colors, there will always be a rainbow copy of some graph $G$ from the family $\mathcal{G}$. A friendship graph $F_k$ is the graph obtained by combining $k$ triangles that share a common vertex. In this paper, we determine the anti-Ramsey number $ar(n, \{F_k\})$ for large values of $n$. Additionally, we also determine the $ar(n, \{K_{1,k}, kK_2\}$, where $K_{1,k}$ is a star graph with $ k+1$ vertices and $kK_2$ is a matching of size $k$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08475v2-abstract-full').style.display = 'none'; document.getElementById('2411.08475v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08468">arXiv:2411.08468</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08468">pdf</a>, <a href="https://arxiv.org/ps/2411.08468">ps</a>, <a href="https://arxiv.org/format/2411.08468">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> $\ell_0$ factor analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+L">Linyang Wang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wanquan Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+B">Bin Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08468v1-abstract-short" style="display: inline;"> Factor Analysis is about finding a low-rank plus sparse additive decomposition from a noisy estimate of the signal covariance matrix. In order to get such a decomposition, we formulate an optimization problem using the nuclear norm for the low-rank component, the $\ell_0$ norm for the sparse component, and the Kullback-Leibler divergence to control the residual in the sample covariance matrix. An&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08468v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08468v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08468v1-abstract-full" style="display: none;"> Factor Analysis is about finding a low-rank plus sparse additive decomposition from a noisy estimate of the signal covariance matrix. In order to get such a decomposition, we formulate an optimization problem using the nuclear norm for the low-rank component, the $\ell_0$ norm for the sparse component, and the Kullback-Leibler divergence to control the residual in the sample covariance matrix. An alternating minimization algorithm is designed for the solution of the optimization problem. The effectiveness of the algorithm is verified via simulations on synthetic and real datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08468v1-abstract-full').style.display = 'none'; document.getElementById('2411.08468v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08248">arXiv:2411.08248</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08248">pdf</a>, <a href="https://arxiv.org/format/2411.08248">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Deceiving Question-Answering Models: A Hybrid Word-Level Adversarial Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Li%2C+J">Jiyao Li</a>, <a href="/search/?searchtype=author&amp;query=Ni%2C+M">Mingze Ni</a>, <a href="/search/?searchtype=author&amp;query=Gong%2C+Y">Yongshun Gong</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wei Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08248v1-abstract-short" style="display: inline;"> Deep learning underpins most of the currently advanced natural language processing (NLP) tasks such as textual classification, neural machine translation (NMT), abstractive summarization and question-answering (QA). However, the robustness of the models, particularly QA models, against adversarial attacks is a critical concern that remains insufficiently explored. This paper introduces QA-Attack (&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08248v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08248v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08248v1-abstract-full" style="display: none;"> Deep learning underpins most of the currently advanced natural language processing (NLP) tasks such as textual classification, neural machine translation (NMT), abstractive summarization and question-answering (QA). However, the robustness of the models, particularly QA models, against adversarial attacks is a critical concern that remains insufficiently explored. This paper introduces QA-Attack (Question Answering Attack), a novel word-level adversarial strategy that fools QA models. Our attention-based attack exploits the customized attention mechanism and deletion ranking strategy to identify and target specific words within contextual passages. It creates deceptive inputs by carefully choosing and substituting synonyms, preserving grammatical integrity while misleading the model to produce incorrect responses. Our approach demonstrates versatility across various question types, particularly when dealing with extensive long textual inputs. Extensive experiments on multiple benchmark datasets demonstrate that QA-Attack successfully deceives baseline QA models and surpasses existing adversarial techniques regarding success rate, semantics changes, BLEU score, fluency and grammar error rate. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08248v1-abstract-full').style.display = 'none'; document.getElementById('2411.08248v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07975">arXiv:2411.07975</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07975">pdf</a>, <a href="https://arxiv.org/format/2411.07975">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> JanusFlow: Harmonizing Autoregression and Rectified Flow for Unified Multimodal Understanding and Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ma%2C+Y">Yiyang Ma</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+X">Xingchao Liu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiaokang Chen</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wen Liu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+C">Chengyue Wu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Z">Zhiyu Wu</a>, <a href="/search/?searchtype=author&amp;query=Pan%2C+Z">Zizheng Pan</a>, <a href="/search/?searchtype=author&amp;query=Xie%2C+Z">Zhenda Xie</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+H">Haowei Zhang</a>, <a href="/search/?searchtype=author&amp;query=yu%2C+X">Xingkai yu</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+L">Liang Zhao</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yisong Wang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+J">Jiaying Liu</a>, <a href="/search/?searchtype=author&amp;query=Ruan%2C+C">Chong Ruan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07975v1-abstract-short" style="display: inline;"> We present JanusFlow, a powerful framework that unifies image understanding and generation in a single model. JanusFlow introduces a minimalist architecture that integrates autoregressive language models with rectified flow, a state-of-the-art method in generative modeling. Our key finding demonstrates that rectified flow can be straightforwardly trained within the large language model framework,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07975v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07975v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07975v1-abstract-full" style="display: none;"> We present JanusFlow, a powerful framework that unifies image understanding and generation in a single model. JanusFlow introduces a minimalist architecture that integrates autoregressive language models with rectified flow, a state-of-the-art method in generative modeling. Our key finding demonstrates that rectified flow can be straightforwardly trained within the large language model framework, eliminating the need for complex architectural modifications. To further improve the performance of our unified model, we adopt two key strategies: (i) decoupling the understanding and generation encoders, and (ii) aligning their representations during unified training. Extensive experiments show that JanusFlow achieves comparable or superior performance to specialized models in their respective domains, while significantly outperforming existing unified approaches across standard benchmarks. This work represents a step toward more efficient and versatile vision-language models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07975v1-abstract-full').style.display = 'none'; document.getElementById('2411.07975v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07730">arXiv:2411.07730</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07730">pdf</a>, <a href="https://arxiv.org/ps/2411.07730">ps</a>, <a href="https://arxiv.org/format/2411.07730">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Study of the light scalar $a_{0}(980)$ through the decay $D^{0} \to a_{0}(980)^-e^{+} 谓_{e}$ with $a_{0}(980)^- \to 畏蟺^-$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (649 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07730v1-abstract-short" style="display: inline;"> Using 7.93 ${\rm fb^{-1}}$ of $e^+e^-$ collision data collected at a center-of-mass energy of 3.773 ${\rm GeV}$ with the BESIII detector, we present an analysis of the decay $D^{0} \to 畏蟺^- e^+ 谓_{e}$. The branching fraction of the decay $D^{0} \to a_{0}(980)^{-} e^+ 谓_{e}$ with $a_{0}(980)^{-} \to 畏蟺^{-}$ is measured to be $(0.86\pm0.17_{\text{stat}}\pm0.05_{\text{syst}})\times 10^{-4}$. The deca&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07730v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07730v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07730v1-abstract-full" style="display: none;"> Using 7.93 ${\rm fb^{-1}}$ of $e^+e^-$ collision data collected at a center-of-mass energy of 3.773 ${\rm GeV}$ with the BESIII detector, we present an analysis of the decay $D^{0} \to 畏蟺^- e^+ 谓_{e}$. The branching fraction of the decay $D^{0} \to a_{0}(980)^{-} e^+ 谓_{e}$ with $a_{0}(980)^{-} \to 畏蟺^{-}$ is measured to be $(0.86\pm0.17_{\text{stat}}\pm0.05_{\text{syst}})\times 10^{-4}$. The decay dynamics of this process is studied with a single-pole parameterization of the hadronic form factor and the Flatt茅 formula describing the $a_0(980)$ line shape in the differential decay rate. The product of the form factor $f^{ a_0}_{+}(0)$ and the Cabibbo-Kobayashi-Maskawa matrix element $|V_{cd}|$ is determined for the first time with the result $f^{ a_0}_+(0)|V_{cd}|=0.126\pm0.013_{\rm stat}\pm0.003_{\rm syst}$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07730v1-abstract-full').style.display = 'none'; document.getElementById('2411.07730v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06767">arXiv:2411.06767</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06767">pdf</a>, <a href="https://arxiv.org/format/2411.06767">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> PDC &amp; DM-SFT: A Road for LLM SQL Bug-Fix Enhancing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Duan%2C+Y">Yiwen Duan</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+Y">Yonghong Yu</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+X">Xiaoming Zhao</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Y">Yichang Wu</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenbo Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06767v1-abstract-short" style="display: inline;"> Code Large Language Models (Code LLMs), such as Code llama and DeepSeek-Coder, have demonstrated exceptional performance in the code generation tasks. However, most existing models focus on the abilities of generating correct code, but often struggle with bug repair. We introduce a suit of methods to enhance LLM&#39;s SQL bug-fixing abilities. The methods are mainly consisted of two parts: A Progressi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06767v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06767v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06767v1-abstract-full" style="display: none;"> Code Large Language Models (Code LLMs), such as Code llama and DeepSeek-Coder, have demonstrated exceptional performance in the code generation tasks. However, most existing models focus on the abilities of generating correct code, but often struggle with bug repair. We introduce a suit of methods to enhance LLM&#39;s SQL bug-fixing abilities. The methods are mainly consisted of two parts: A Progressive Dataset Construction (PDC) from scratch and Dynamic Mask Supervised Fine-tuning (DM-SFT). PDC proposes two data expansion methods from the perspectives of breadth first and depth first respectively. DM-SFT introduces an efficient bug-fixing supervised learning approach, which effectively reduce the total training steps and mitigate the &#34;disorientation&#34; in SQL code bug-fixing training. In our evaluation, the code LLM models trained with two methods have exceeds all current best performing model which size is much larger. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06767v1-abstract-full').style.display = 'none'; document.getElementById('2411.06767v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">COLING-Industry 2025 accepted</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06384">arXiv:2411.06384</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06384">pdf</a>, <a href="https://arxiv.org/ps/2411.06384">ps</a>, <a href="https://arxiv.org/format/2411.06384">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Strongly Correlated Electrons">cond-mat.str-el</span> </div> </div> <p class="title is-5 mathjax"> Thermal Broadening of Phonon Spectral Function in Classical Lattice Models: Projective Truncation Approximation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Jia%2C+H">Hu-Wei Jia</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wen-Jun Liu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Y">Yue-Hong Wu</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+K">Kou-Han Ma</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+L">Lei Wang</a>, <a href="/search/?searchtype=author&amp;query=Tong%2C+N">Ning-Hua Tong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06384v1-abstract-short" style="display: inline;"> Thermal broadening of the quasi-particle peak in the spectral function is an important physical feature in many statistical systems, but difficult to calculate. Within the projective truncation approximation (PTA) of Green&#39;s function equation of motion for classical systems, we produce the spectral function with thermal broadened quasi-particles peak using an $H$-expanded basis. We demonstrate thi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06384v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06384v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06384v1-abstract-full" style="display: none;"> Thermal broadening of the quasi-particle peak in the spectral function is an important physical feature in many statistical systems, but difficult to calculate. Within the projective truncation approximation (PTA) of Green&#39;s function equation of motion for classical systems, we produce the spectral function with thermal broadened quasi-particles peak using an $H$-expanded basis. We demonstrate this method on two model systems, the one-variable anharmonic oscillator model and the one-dimensional classical $蠁^4$ lattice model. Comparison with exact spectral function and the molecular dynamics simulation results shows that the method is semi-quantitatively accurate. Extension of the $H$-expanded basis to PTA for quantum system is possible. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06384v1-abstract-full').style.display = 'none'; document.getElementById('2411.06384v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 14 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06374">arXiv:2411.06374</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06374">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Metric Learning for Tag Recommendation: Tackling Data Sparsity and Cold Start Issues </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Luo%2C+Y">Yuanshuai Luo</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+R">Rui Wang</a>, <a href="/search/?searchtype=author&amp;query=Liang%2C+Y">Yaxin Liang</a>, <a href="/search/?searchtype=author&amp;query=Liang%2C+A">Ankai Liang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenyi Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06374v1-abstract-short" style="display: inline;"> With the rapid growth of digital information, personalized recommendation systems have become an indispensable part of Internet services, especially in the fields of e-commerce, social media, and online entertainment. However, traditional collaborative filtering and content-based recommendation methods have limitations in dealing with data sparsity and cold start problems, especially in the face o&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06374v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06374v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06374v1-abstract-full" style="display: none;"> With the rapid growth of digital information, personalized recommendation systems have become an indispensable part of Internet services, especially in the fields of e-commerce, social media, and online entertainment. However, traditional collaborative filtering and content-based recommendation methods have limitations in dealing with data sparsity and cold start problems, especially in the face of largescale heterogeneous data, which makes it difficult to meet user expectations. This paper proposes a new label recommendation algorithm based on metric learning, which aims to overcome the challenges of traditional recommendation systems by learning effective distance or similarity metrics to capture the subtle differences between user preferences and item features. Experimental results show that the algorithm outperforms baseline methods including local response metric learning (LRML), collaborative metric learning (CML), and adaptive tensor factorization (ATF) based on adversarial learning on multiple evaluation metrics. In particular, it performs particularly well in the accuracy of the first few recommended items, while maintaining high robustness and maintaining high recommendation accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06374v1-abstract-full').style.display = 'none'; document.getElementById('2411.06374v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05881">arXiv:2411.05881</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05881">pdf</a>, <a href="https://arxiv.org/format/2411.05881">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> MIPD: A Multi-sensory Interactive Perception Dataset for Embodied Intelligent Driving </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Li%2C+Z">Zhiwei Li</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+T">Tingzhen Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+M">Meihua Zhou</a>, <a href="/search/?searchtype=author&amp;query=Tang%2C+D">Dandan Tang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+P">Pengwei Zhang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenzhuo Liu</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+Q">Qiaoning Yang</a>, <a href="/search/?searchtype=author&amp;query=Shen%2C+T">Tianyu Shen</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+K">Kunfeng Wang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+H">Huaping Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05881v1-abstract-short" style="display: inline;"> During the process of driving, humans usually rely on multiple senses to gather information and make decisions. Analogously, in order to achieve embodied intelligence in autonomous driving, it is essential to integrate multidimensional sensory information in order to facilitate interaction with the environment. However, the current multi-modal fusion sensing schemes often neglect these additional&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05881v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05881v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05881v1-abstract-full" style="display: none;"> During the process of driving, humans usually rely on multiple senses to gather information and make decisions. Analogously, in order to achieve embodied intelligence in autonomous driving, it is essential to integrate multidimensional sensory information in order to facilitate interaction with the environment. However, the current multi-modal fusion sensing schemes often neglect these additional sensory inputs, hindering the realization of fully autonomous driving. This paper considers multi-sensory information and proposes a multi-modal interactive perception dataset named MIPD, enabling expanding the current autonomous driving algorithm framework, for supporting the research on embodied intelligent driving. In addition to the conventional camera, lidar, and 4D radar data, our dataset incorporates multiple sensor inputs including sound, light intensity, vibration intensity and vehicle speed to enrich the dataset comprehensiveness. Comprising 126 consecutive sequences, many exceeding twenty seconds, MIPD features over 8,500 meticulously synchronized and annotated frames. Moreover, it encompasses many challenging scenarios, covering various road and lighting conditions. The dataset has undergone thorough experimental validation, producing valuable insights for the exploration of next-generation autonomous driving frameworks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05881v1-abstract-full').style.display = 'none'; document.getElementById('2411.05881v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Data, development kit and more details will be available at https://github.com/BUCT-IUSRC/Dataset MIPD</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05669">arXiv:2411.05669</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05669">pdf</a>, <a href="https://arxiv.org/format/2411.05669">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Nuclear Experiment">nucl-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Measurement of the $蠄(2S)$ to $J/蠄$ cross-section ratio as a function of centrality in PbPb collisions at $\sqrt{s_{\text{NN}}}$ = 5.02 TeV </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1128 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05669v1-abstract-short" style="display: inline;"> The dissociation of quarkonium states with different binding energies produced in heavy-ion collisions is a powerful probe for investigating the formation and properties of the quark-gluon plasma. The ratio of production cross-sections of $蠄(2S)$ and $J/蠄$ mesons times the ratio of their branching fractions into the dimuon final state is measured as a function of centrality using data collected by&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05669v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05669v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05669v1-abstract-full" style="display: none;"> The dissociation of quarkonium states with different binding energies produced in heavy-ion collisions is a powerful probe for investigating the formation and properties of the quark-gluon plasma. The ratio of production cross-sections of $蠄(2S)$ and $J/蠄$ mesons times the ratio of their branching fractions into the dimuon final state is measured as a function of centrality using data collected by the LHCb detector in PbPb collisions at $\sqrt{s_{\text{NN}}}$ = 5.02 TeV. The measured ratio shows no dependence on the collision centrality, and is compared to the latest theory predictions and to the recent measurements in literature. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05669v1-abstract-full').style.display = 'none'; document.getElementById('2411.05669v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with any supplementary material and additional information, are available at https://cern.ch/lhcbproject/Publications/p/LHCb-PAPER-2024-041.html (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> CERN-EP-2024-272, LHCb-PAPER-2024-041 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05396">arXiv:2411.05396</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05396">pdf</a>, <a href="https://arxiv.org/format/2411.05396">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cosmology and Nongalactic Astrophysics">astro-ph.CO</span> </div> </div> <p class="title is-5 mathjax"> Probing the He II re-Ionization ERa via Absorbing C IV Historical Yield (HIERACHY) II: Project Design, Current Status, and Examples of Initial Data Products </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Li%2C+J">Jiang-Tao Li</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+X">Xiaodi Yu</a>, <a href="/search/?searchtype=author&amp;query=Mao%2C+H">Huiyang Mao</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+H">Hanxiao Chen</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+T">Tiancheng Yang</a>, <a href="/search/?searchtype=author&amp;query=Qu%2C+Z">Zhijie Qu</a>, <a href="/search/?searchtype=author&amp;query=Bian%2C+F">Fuyan Bian</a>, <a href="/search/?searchtype=author&amp;query=Bregman%2C+J+N">Joel N. Bregman</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+Z">Zheng Cai</a>, <a href="/search/?searchtype=author&amp;query=Fan%2C+X">Xiaohui Fan</a>, <a href="/search/?searchtype=author&amp;query=Fang%2C+T">Taotao Fang</a>, <a href="/search/?searchtype=author&amp;query=Ji%2C+L">Li Ji</a>, <a href="/search/?searchtype=author&amp;query=Ji%2C+Z">Zhiyuan Ji</a>, <a href="/search/?searchtype=author&amp;query=Johnson%2C+S+D">Sean D. Johnson</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+G">Guoliang Li</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Weizhe Liu</a>, <a href="/search/?searchtype=author&amp;query=Song%2C+Y">Ying-Yi Song</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+F">Feige Wang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+T">Tao Wang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+X">Xin Wang</a>, <a href="/search/?searchtype=author&amp;query=Williams%2C+C">Christina Williams</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+M">Mingxuan Xu</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+J">Jinyi Yang</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+Y">Yang Yang</a>, <a href="/search/?searchtype=author&amp;query=Zheng%2C+X">Xianzhong Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05396v1-abstract-short" style="display: inline;"> The He II reionization epoch is expected to take place at $z\sim3-5$. In this stage, the helium and metals in the inter-galactic medium (IGM) are further ionized with additional contributions from harder non-stellar sources, and some large-scale gravitationally bound systems approach virialization. The &#34;Probing the He II re-Ionization ERa via Absorbing C IV Historical Yield (HIERACHY)&#34; program uti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05396v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05396v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05396v1-abstract-full" style="display: none;"> The He II reionization epoch is expected to take place at $z\sim3-5$. In this stage, the helium and metals in the inter-galactic medium (IGM) are further ionized with additional contributions from harder non-stellar sources, and some large-scale gravitationally bound systems approach virialization. The &#34;Probing the He II re-Ionization ERa via Absorbing C IV Historical Yield (HIERACHY)&#34; program utilizes high- and medium-resolution spectra of bright background quasars at $z\approx3.9-5.2$ to investigate Ly$伪$, C IV, and other metal absorption lines during this epoch. Additionally, we employ narrow-band imaging to search for Ly$伪$ emitters associated with C IV absorbers, alongside multi-wavelength observations to identify and study particularly intriguing cases. In this paper, we present the design of the HIERACHY program, its current status, major scientific goals, and examples of initial data products from completed Magellan/MIKE, MagE spectroscopy, and MDM imaging observations. We also provide a brief outlook on future multi-wavelength observations that may significantly impact the related science. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05396v1-abstract-full').style.display = 'none'; document.getElementById('2411.05396v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25 pages, 10 figures, 2 tables, accepted for publication by ApJ</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05307">arXiv:2411.05307</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05307">pdf</a>, <a href="https://arxiv.org/format/2411.05307">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Revisiting Network Perturbation for Semi-Supervised Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Li%2C+S">Sien Li</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+T">Tao Wang</a>, <a href="/search/?searchtype=author&amp;query=Hu%2C+R">Ruizhe Hu</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenxi Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05307v1-abstract-short" style="display: inline;"> In semi-supervised semantic segmentation (SSS), weak-to-strong consistency regularization techniques are widely utilized in recent works, typically combined with input-level and feature-level perturbations. However, the integration between weak-to-strong consistency regularization and network perturbation has been relatively rare. We note several problems with existing network perturbations in SSS&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05307v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05307v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05307v1-abstract-full" style="display: none;"> In semi-supervised semantic segmentation (SSS), weak-to-strong consistency regularization techniques are widely utilized in recent works, typically combined with input-level and feature-level perturbations. However, the integration between weak-to-strong consistency regularization and network perturbation has been relatively rare. We note several problems with existing network perturbations in SSS that may contribute to this phenomenon. By revisiting network perturbations, we introduce a new approach for network perturbation to expand the existing weak-to-strong consistency regularization for unlabeled data. Additionally, we present a volatile learning process for labeled data, which is uncommon in existing research. Building upon previous work that includes input-level and feature-level perturbations, we present MLPMatch (Multi-Level-Perturbation Match), an easy-to-implement and efficient framework for semi-supervised semantic segmentation. MLPMatch has been validated on the Pascal VOC and Cityscapes datasets, achieving state-of-the-art performance. Code is available from https://github.com/LlistenL/MLPMatch. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05307v1-abstract-full').style.display = 'none'; document.getElementById('2411.05307v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by PRCV2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04954">arXiv:2411.04954</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04954">pdf</a>, <a href="https://arxiv.org/format/2411.04954">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> CAD-MLLM: Unifying Multimodality-Conditioned CAD Generation With MLLM </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Xu%2C+J">Jingwei Xu</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+C">Chenyu Wang</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+Z">Zibo Zhao</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wen Liu</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+Y">Yi Ma</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+S">Shenghua Gao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04954v1-abstract-short" style="display: inline;"> This paper aims to design a unified Computer-Aided Design (CAD) generation system that can easily generate CAD models based on the user&#39;s inputs in the form of textual description, images, point clouds, or even a combination of them. Towards this goal, we introduce the CAD-MLLM, the first system capable of generating parametric CAD models conditioned on the multimodal input. Specifically, within t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04954v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04954v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04954v1-abstract-full" style="display: none;"> This paper aims to design a unified Computer-Aided Design (CAD) generation system that can easily generate CAD models based on the user&#39;s inputs in the form of textual description, images, point clouds, or even a combination of them. Towards this goal, we introduce the CAD-MLLM, the first system capable of generating parametric CAD models conditioned on the multimodal input. Specifically, within the CAD-MLLM framework, we leverage the command sequences of CAD models and then employ advanced large language models (LLMs) to align the feature space across these diverse multi-modalities data and CAD models&#39; vectorized representations. To facilitate the model training, we design a comprehensive data construction and annotation pipeline that equips each CAD model with corresponding multimodal data. Our resulting dataset, named Omni-CAD, is the first multimodal CAD dataset that contains textual description, multi-view images, points, and command sequence for each CAD model. It contains approximately 450K instances and their CAD construction sequences. To thoroughly evaluate the quality of our generated CAD models, we go beyond current evaluation metrics that focus on reconstruction quality by introducing additional metrics that assess topology quality and surface enclosure extent. Extensive experimental results demonstrate that CAD-MLLM significantly outperforms existing conditional generative methods and remains highly robust to noises and missing points. The project page and more visualizations can be found at: https://cad-mllm.github.io/ <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04954v1-abstract-full').style.display = 'none'; document.getElementById('2411.04954v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project page: https://cad-mllm.github.io/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04890">arXiv:2411.04890</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04890">pdf</a>, <a href="https://arxiv.org/format/2411.04890">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> GUI Agents with Foundation Models: A Comprehensive Survey </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+S">Shuai Wang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Weiwen Liu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+J">Jingxuan Chen</a>, <a href="/search/?searchtype=author&amp;query=Gan%2C+W">Weinan Gan</a>, <a href="/search/?searchtype=author&amp;query=Zeng%2C+X">Xingshan Zeng</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+S">Shuai Yu</a>, <a href="/search/?searchtype=author&amp;query=Hao%2C+X">Xinlong Hao</a>, <a href="/search/?searchtype=author&amp;query=Shao%2C+K">Kun Shao</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yasheng Wang</a>, <a href="/search/?searchtype=author&amp;query=Tang%2C+R">Ruiming Tang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04890v1-abstract-short" style="display: inline;"> Recent advances in foundation models, particularly Large Language Models (LLMs) and Multimodal Large Language Models (MLLMs), facilitate intelligent agents being capable of performing complex tasks. By leveraging the ability of (M)LLMs to process and interpret Graphical User Interfaces (GUIs), these agents can autonomously execute user instructions by simulating human-like interactions such as cli&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04890v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04890v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04890v1-abstract-full" style="display: none;"> Recent advances in foundation models, particularly Large Language Models (LLMs) and Multimodal Large Language Models (MLLMs), facilitate intelligent agents being capable of performing complex tasks. By leveraging the ability of (M)LLMs to process and interpret Graphical User Interfaces (GUIs), these agents can autonomously execute user instructions by simulating human-like interactions such as clicking and typing. This survey consolidates recent research on (M)LLM-based GUI agents, highlighting key innovations in data, frameworks, and applications. We begin by discussing representative datasets and benchmarks. Next, we summarize a unified framework that captures the essential components used in prior research, accompanied by a taxonomy. Additionally, we explore commercial applications of (M)LLM-based GUI agents. Drawing from existing work, we identify several key challenges and propose future research directions. We hope this paper will inspire further developments in the field of (M)LLM-based GUI agents. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04890v1-abstract-full').style.display = 'none'; document.getElementById('2411.04890v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04794">arXiv:2411.04794</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04794">pdf</a>, <a href="https://arxiv.org/format/2411.04794">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> AlignXIE: Improving Multilingual Information Extraction by Cross-Lingual Alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zuo%2C+Y">Yuxin Zuo</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+W">Wenxuan Jiang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenxuan Liu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Z">Zixuan Li</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+L">Long Bai</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+H">Hanbin Wang</a>, <a href="/search/?searchtype=author&amp;query=Zeng%2C+Y">Yutao Zeng</a>, <a href="/search/?searchtype=author&amp;query=Jin%2C+X">Xiaolong Jin</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+J">Jiafeng Guo</a>, <a href="/search/?searchtype=author&amp;query=Cheng%2C+X">Xueqi Cheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04794v1-abstract-short" style="display: inline;"> Empirical evidence suggests that LLMs exhibit spontaneous cross-lingual alignment. Our findings suggest that although LLMs also demonstrate promising cross-lingual alignment in Information Extraction, there remains significant imbalance across languages, revealing an underlying deficiency in the IE alignment. To address this issue, we propose AlignXIE, a powerful code-based LLM that significantly&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04794v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04794v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04794v1-abstract-full" style="display: none;"> Empirical evidence suggests that LLMs exhibit spontaneous cross-lingual alignment. Our findings suggest that although LLMs also demonstrate promising cross-lingual alignment in Information Extraction, there remains significant imbalance across languages, revealing an underlying deficiency in the IE alignment. To address this issue, we propose AlignXIE, a powerful code-based LLM that significantly enhances cross-lingual IE alignment through two strategies. Firstly, AlignXIE formulates IE across different languages, especially non-English ones, as code generation tasks, standardizing the representation of various schemas using Python classes to ensure consistency of the same ontology in different languages and align the schema. Secondly, it incorporates an IE cross-lingual alignment phase through a translated instance prediction task proposed in this paper to align the extraction process, utilizing ParallelNER, an IE bilingual parallel dataset with 257,190 samples, generated by our proposed LLM-based automatic pipeline for IE parallel data construction, with manual annotation to ensure quality. Ultimately, we obtain AlignXIE through multilingual IE instruction tuning. Although without training in 9 unseen languages, AlignXIE surpasses ChatGPT by $30.17\%$ and SoTA by $20.03\%$, thereby demonstrating superior cross-lingual IE capabilities. Comprehensive evaluations on 63 IE benchmarks in Chinese and English under various settings, demonstrate that AlignXIE significantly enhances cross-lingual and multilingual IE through boosting the IE alignment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04794v1-abstract-full').style.display = 'none'; document.getElementById('2411.04794v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Work in progress</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04666">arXiv:2411.04666</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04666">pdf</a>, <a href="https://arxiv.org/format/2411.04666">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> Topological Singularities in Metasurface Scattering Matrices: From Nodal Lines to Exceptional Lines </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+J">Jingguang Chen</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenzhe Liu</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+J">Jiajun Wang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+R">Ruo-Yang Zhang</a>, <a href="/search/?searchtype=author&amp;query=Cui%2C+X">Xiaohan Cui</a>, <a href="/search/?searchtype=author&amp;query=Guan%2C+F">Fang Guan</a>, <a href="/search/?searchtype=author&amp;query=Shi%2C+L">Lei Shi</a>, <a href="/search/?searchtype=author&amp;query=Zi%2C+J">Jian Zi</a>, <a href="/search/?searchtype=author&amp;query=Chan%2C+C+T">C. T. Chan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04666v1-abstract-short" style="display: inline;"> Topological properties of photonic structures described by Hamiltonian matrices have been extensively studied in recent years. Photonic systems are often open systems, and their coupling with the environment is characterized by scattering matrices, which can exhibit topological features as well. In this work, we uncover that topological singularities can be manifested in the scattering matrices of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04666v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04666v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04666v1-abstract-full" style="display: none;"> Topological properties of photonic structures described by Hamiltonian matrices have been extensively studied in recent years. Photonic systems are often open systems, and their coupling with the environment is characterized by scattering matrices, which can exhibit topological features as well. In this work, we uncover that topological singularities can be manifested in the scattering matrices of two-dimensional periodic photonic systems with open boundaries in the third dimension, introducing a new topological approach to describe scattering. We elaborate the importance of symmetry and demonstrate that mirror symmetry gives rise to the formation of diabolic points and nodal lines in the three-dimensional frequency-momentum space, which transform into exceptional points and lines in the presence of material loss. These topological features in the eigenvalue structure of the scattering matrix manifest as vortex lines in the cross-polarization scattering phase, providing a direct link between the eigen-problem and observable scattering phenomena in the frequency-momentum space. We demonstrate these phenomena numerically and experimentally using a reflective non-local metasurface. These findings extend the concept of topological singularities to scattering matrices and pave the way for novel photonic devices and wavefront engineering techniques. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04666v1-abstract-full').style.display = 'none'; document.getElementById('2411.04666v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03399">arXiv:2411.03399</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.03399">pdf</a>, <a href="https://arxiv.org/format/2411.03399">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Study of $D_{s1}(2460)^{+}\to D_{s}^{+}蟺^{+}蟺^{-}$ in $B\to {\bar{D}}^{(*)}D_{s}^{+}蟺^{+}蟺^{-}$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1124 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03399v1-abstract-short" style="display: inline;"> An amplitude analysis of the $D_{s1}(2460)^+\to D_{s}^{+}蟺^{+}蟺^{-}$ transition is performed simultaneously in $B^{0}\to D^{-}D_{s}^{+}蟺^{+}蟺^{-}$, $B^{+}\to{\bar{D}}^{0} D_{s}^{+}蟺^{+}蟺^{-}$, and $B^{0}\to D^{*-}D_{s}^{+}蟺^{+}蟺^{-}$ decays. The study is based on a data sample of proton-proton collisions recorded with the LHCb detector at centre-of-mass energies of $\sqrt{s}=7,8,$ and $13\,$TeV, c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03399v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03399v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03399v1-abstract-full" style="display: none;"> An amplitude analysis of the $D_{s1}(2460)^+\to D_{s}^{+}蟺^{+}蟺^{-}$ transition is performed simultaneously in $B^{0}\to D^{-}D_{s}^{+}蟺^{+}蟺^{-}$, $B^{+}\to{\bar{D}}^{0} D_{s}^{+}蟺^{+}蟺^{-}$, and $B^{0}\to D^{*-}D_{s}^{+}蟺^{+}蟺^{-}$ decays. The study is based on a data sample of proton-proton collisions recorded with the LHCb detector at centre-of-mass energies of $\sqrt{s}=7,8,$ and $13\,$TeV, corresponding to a total integrated luminosity of $9\,\rm{fb}^{-1}$. A clear double-peak structure is observed in the $m(蟺^{+}蟺^{-})$ spectrum of the $D_{s1}(2460)^{+}\to D_{s}^{+}蟺^{+}蟺^{-}$ decay. The data can be described either with a model including $f_0(500)$, $f_0(980)$ and $f_2(1270)$ resonances, in which the contributions of $f_0(980)$ and $f_2(1270)$ are unexpectedly large, or with a model including $f_0(500)$, a doubly charged open-charm tetraquark state $T_{c\bar{s}}^{++}$ and its isospin partner $T_{c\bar{s}}^{0}$. If the former is considered implausible, the $T_{c\bar{s}}$ states are observed with high significance, and the data are consistent with isospin symmetry. When imposing isospin constraints between the two $T_{c\bar{s}}$ states, their mass and width are determined to be $2327\pm13\pm13\,$MeV and $96\pm16\,^{+170}_{-23}\,$MeV, respectively, where the first uncertainty is statistical and the second is systematic. The mass is slightly below the $DK$ threshold, and a spin-parity of $0^+$ is favoured with high significance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03399v1-abstract-full').style.display = 'none'; document.getElementById('2411.03399v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/3280/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-033, CERN-EP-2024-264 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02729">arXiv:2411.02729</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02729">pdf</a>, <a href="https://arxiv.org/format/2411.02729">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> </div> </div> <p class="title is-5 mathjax"> Analysis of Multi-epoch JWST Images of $\sim 300$ Little Red Dots: Tentative Detection of Variability in a Minority of Sources </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhang%2C+Z">Zijian Zhang</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+L">Linhua Jiang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Weiyang Liu</a>, <a href="/search/?searchtype=author&amp;query=Ho%2C+L+C">Luis C. Ho</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02729v1-abstract-short" style="display: inline;"> James Webb Space Telescope (JWST) has revealed a population of red and compact sources at $z \gtrsim 5$ known as &#34;Little Red Dots&#34; (LRDs) that are likely active galactic nuclei (AGNs). Here we present a comprehensive study of the variability of 314 LRDs with multi-epoch JWST observations in five deep fields: UDS, GOODS-S, GOODS-N, Abell 2744, and COSMOS. Our analyses use all publicly available JWS&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02729v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02729v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02729v1-abstract-full" style="display: none;"> James Webb Space Telescope (JWST) has revealed a population of red and compact sources at $z \gtrsim 5$ known as &#34;Little Red Dots&#34; (LRDs) that are likely active galactic nuclei (AGNs). Here we present a comprehensive study of the variability of 314 LRDs with multi-epoch JWST observations in five deep fields: UDS, GOODS-S, GOODS-N, Abell 2744, and COSMOS. Our analyses use all publicly available JWST NIRCam imaging data in these fields, together with multi-epoch JWST MIRI images available. We measure the significance of the variabilities (signal-to-noise ratio or ${\rm SNR}_{\rm var}$) for all LRDs and statistically evaluate their variabilities using the ${\rm SNR}_{\rm var}$ distributions. We pay particular attention to the systematic offsets of photometric zero points among different epochs that seem to commonly exist. The derived ${\rm SNR}_{\rm var}$ distributions of the LRDs, including those with broad H$伪$/H$尾$ emission lines, follow the standard Gaussian distribution, and are generally consistent with those of the comparison samples of objects detected in the same images. This finding suggests that the LRD population on average does not show strong variability, which can be due to super-Eddington accretion of the black holes in AGNs. Alternatively, they are dominated by galaxies. We also find eight strongly variable LRD candidates with variability amplitudes of 0.24 - 0.82 mag. The rest-frame optical SEDs of these variable LRDs should have significant AGN contribution. Future JWST observations will provide more variability information of LRDs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02729v1-abstract-full').style.display = 'none'; document.getElementById('2411.02729v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23 pages, 15 figures, 6 tables. Submitted to ApJ. The light curves and cut-out images of all LRDs can be found at https://github.com/Zijian-astro/variable_LRD_candidates</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02265">arXiv:2411.02265</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02265">pdf</a>, <a href="https://arxiv.org/format/2411.02265">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Hunyuan-Large: An Open-Source MoE Model with 52 Billion Activated Parameters by Tencent </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Sun%2C+X">Xingwu Sun</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yanfeng Chen</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+Y">Yiqing Huang</a>, <a href="/search/?searchtype=author&amp;query=Xie%2C+R">Ruobing Xie</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+J">Jiaqi Zhu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+K">Kai Zhang</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+S">Shuaipeng Li</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+Z">Zhen Yang</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+J">Jonny Han</a>, <a href="/search/?searchtype=author&amp;query=Shu%2C+X">Xiaobo Shu</a>, <a href="/search/?searchtype=author&amp;query=Bu%2C+J">Jiahao Bu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Z">Zhongzhi Chen</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+X">Xuemeng Huang</a>, <a href="/search/?searchtype=author&amp;query=Lian%2C+F">Fengzong Lian</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+S">Saiyong Yang</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+J">Jianfeng Yan</a>, <a href="/search/?searchtype=author&amp;query=Zeng%2C+Y">Yuyuan Zeng</a>, <a href="/search/?searchtype=author&amp;query=Ren%2C+X">Xiaoqin Ren</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+C">Chao Yu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+L">Lulu Wu</a>, <a href="/search/?searchtype=author&amp;query=Mao%2C+Y">Yue Mao</a>, <a href="/search/?searchtype=author&amp;query=Xia%2C+J">Jun Xia</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+T">Tao Yang</a>, <a href="/search/?searchtype=author&amp;query=Zheng%2C+S">Suncong Zheng</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+K">Kan Wu</a> , et al. (83 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02265v3-abstract-short" style="display: inline;"> In this paper, we introduce Hunyuan-Large, which is currently the largest open-source Transformer-based mixture of experts model, with a total of 389 billion parameters and 52 billion activation parameters, capable of handling up to 256K tokens. We conduct a thorough evaluation of Hunyuan-Large&#39;s superior performance across various benchmarks including language understanding and generation, logica&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02265v3-abstract-full').style.display = 'inline'; document.getElementById('2411.02265v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02265v3-abstract-full" style="display: none;"> In this paper, we introduce Hunyuan-Large, which is currently the largest open-source Transformer-based mixture of experts model, with a total of 389 billion parameters and 52 billion activation parameters, capable of handling up to 256K tokens. We conduct a thorough evaluation of Hunyuan-Large&#39;s superior performance across various benchmarks including language understanding and generation, logical reasoning, mathematical problem-solving, coding, long-context, and aggregated tasks, where it outperforms LLama3.1-70B and exhibits comparable performance when compared to the significantly larger LLama3.1-405B model. Key practice of Hunyuan-Large include large-scale synthetic data that is orders larger than in previous literature, a mixed expert routing strategy, a key-value cache compression technique, and an expert-specific learning rate strategy. Additionally, we also investigate the scaling laws and learning rate schedule of mixture of experts models, providing valuable insights and guidances for future model development and optimization. The code and checkpoints of Hunyuan-Large are released to facilitate future innovations and applications. Codes: https://github.com/Tencent/Hunyuan-Large Models: https://huggingface.co/tencent/Tencent-Hunyuan-Large <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02265v3-abstract-full').style.display = 'none'; document.getElementById('2411.02265v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 4 Figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02115">arXiv:2411.02115</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02115">pdf</a>, <a href="https://arxiv.org/format/2411.02115">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> FedMoE-DA: Federated Mixture of Experts via Domain Aware Fine-grained Aggregation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhan%2C+Z">Ziwei Zhan</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+W">Wenkuan Zhao</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yuanqing Li</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Weijie Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+X">Xiaoxi Zhang</a>, <a href="/search/?searchtype=author&amp;query=Tan%2C+C+W">Chee Wei Tan</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+C">Chuan Wu</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+D">Deke Guo</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xu Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02115v1-abstract-short" style="display: inline;"> Federated learning (FL) is a collaborative machine learning approach that enables multiple clients to train models without sharing their private data. With the rise of deep learning, large-scale models have garnered significant attention due to their exceptional performance. However, a key challenge in FL is the limitation imposed by clients with constrained computational and communication resourc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02115v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02115v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02115v1-abstract-full" style="display: none;"> Federated learning (FL) is a collaborative machine learning approach that enables multiple clients to train models without sharing their private data. With the rise of deep learning, large-scale models have garnered significant attention due to their exceptional performance. However, a key challenge in FL is the limitation imposed by clients with constrained computational and communication resources, which hampers the deployment of these large models. The Mixture of Experts (MoE) architecture addresses this challenge with its sparse activation property, which reduces computational workload and communication demands during inference and updates. Additionally, MoE facilitates better personalization by allowing each expert to specialize in different subsets of the data distribution. To alleviate the communication burdens between the server and clients, we propose FedMoE-DA, a new FL model training framework that leverages the MoE architecture and incorporates a novel domain-aware, fine-grained aggregation strategy to enhance the robustness, personalizability, and communication efficiency simultaneously. Specifically, the correlation between both intra-client expert models and inter-client data heterogeneity is exploited. Moreover, we utilize peer-to-peer (P2P) communication between clients for selective expert model synchronization, thus significantly reducing the server-client transmissions. Experiments demonstrate that our FedMoE-DA achieves excellent performance while reducing the communication pressure on the server. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02115v1-abstract-full').style.display = 'none'; document.getElementById('2411.02115v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Liu%2C+W&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Liu%2C+W&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Liu%2C+W&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Liu%2C+W&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Liu%2C+W&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Liu%2C+W&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10